language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py | {
"start": 64280,
"end": 64679
} | class ____(Phi3PreTrainedModel):
input_modalities = ("image", "audio", "text")
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Phi4MultimodalImageEmbedding):
init.zeros_(module.global_img_feature_extensor)
init.zeros_(module.sub_img_feature_extensor)
| Phi4MultimodalPreTrainedModel |
python | anthropics__anthropic-sdk-python | tests/api_resources/beta/messages/test_batches.py | {
"start": 18264,
"end": 36818
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_create(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.create(
requests=[
{
"custom_id": "my-custom-id-1",
"params": {
"max_tokens": 1024,
"messages": [
{
"content": "Hello, world",
"role": "user",
}
],
"model": "claude-sonnet-4-5-20250929",
},
}
],
)
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.create(
requests=[
{
"custom_id": "my-custom-id-1",
"params": {
"max_tokens": 1024,
"messages": [
{
"content": "Hello, world",
"role": "user",
}
],
"model": "claude-sonnet-4-5-20250929",
"container": {
"id": "id",
"skills": [
{
"skill_id": "x",
"type": "anthropic",
"version": "x",
}
],
},
"context_management": {
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
"mcp_servers": [
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
"metadata": {"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
"output_config": {"effort": "low"},
"output_format": {
"schema": {"foo": "bar"},
"type": "json_schema",
},
"service_tier": "auto",
"stop_sequences": ["string"],
"stream": False,
"system": [
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
"temperature": 1,
"thinking": {
"budget_tokens": 1024,
"type": "enabled",
},
"tool_choice": {
"type": "auto",
"disable_parallel_tool_use": True,
},
"tools": [
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
"top_k": 5,
"top_p": 0.7,
},
}
],
betas=["string"],
)
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_raw_response_create(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.batches.with_raw_response.create(
requests=[
{
"custom_id": "my-custom-id-1",
"params": {
"max_tokens": 1024,
"messages": [
{
"content": "Hello, world",
"role": "user",
}
],
"model": "claude-sonnet-4-5-20250929",
},
}
],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_streaming_response_create(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.batches.with_streaming_response.create(
requests=[
{
"custom_id": "my-custom-id-1",
"params": {
"max_tokens": 1024,
"messages": [
{
"content": "Hello, world",
"role": "user",
}
],
"model": "claude-sonnet-4-5-20250929",
},
}
],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = await response.parse()
assert_matches_type(BetaMessageBatch, batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.retrieve(
message_batch_id="message_batch_id",
)
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.retrieve(
message_batch_id="message_batch_id",
betas=["string"],
)
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.batches.with_raw_response.retrieve(
message_batch_id="message_batch_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.batches.with_streaming_response.retrieve(
message_batch_id="message_batch_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = await response.parse()
assert_matches_type(BetaMessageBatch, batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncAnthropic) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_batch_id` but received ''"):
await async_client.beta.messages.batches.with_raw_response.retrieve(
message_batch_id="",
)
@parametrize
async def test_method_list(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.list()
assert_matches_type(AsyncPage[BetaMessageBatch], batch, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.list(
after_id="after_id",
before_id="before_id",
limit=1,
betas=["string"],
)
assert_matches_type(AsyncPage[BetaMessageBatch], batch, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.batches.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(AsyncPage[BetaMessageBatch], batch, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.batches.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = await response.parse()
assert_matches_type(AsyncPage[BetaMessageBatch], batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.delete(
message_batch_id="message_batch_id",
)
assert_matches_type(BetaDeletedMessageBatch, batch, path=["response"])
@parametrize
async def test_method_delete_with_all_params(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.delete(
message_batch_id="message_batch_id",
betas=["string"],
)
assert_matches_type(BetaDeletedMessageBatch, batch, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.batches.with_raw_response.delete(
message_batch_id="message_batch_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(BetaDeletedMessageBatch, batch, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.batches.with_streaming_response.delete(
message_batch_id="message_batch_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = await response.parse()
assert_matches_type(BetaDeletedMessageBatch, batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncAnthropic) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_batch_id` but received ''"):
await async_client.beta.messages.batches.with_raw_response.delete(
message_batch_id="",
)
@parametrize
async def test_method_cancel(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.cancel(
message_batch_id="message_batch_id",
)
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@parametrize
async def test_method_cancel_with_all_params(self, async_client: AsyncAnthropic) -> None:
batch = await async_client.beta.messages.batches.cancel(
message_batch_id="message_batch_id",
betas=["string"],
)
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.batches.with_raw_response.cancel(
message_batch_id="message_batch_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = response.parse()
assert_matches_type(BetaMessageBatch, batch, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.batches.with_streaming_response.cancel(
message_batch_id="message_batch_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
batch = await response.parse()
assert_matches_type(BetaMessageBatch, batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncAnthropic) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_batch_id` but received ''"):
await async_client.beta.messages.batches.with_raw_response.cancel(
message_batch_id="",
)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("async_client", [False], indirect=True)
async def test_method_results(self, async_client: AsyncAnthropic, respx_mock: MockRouter) -> None:
respx_mock.get("/v1/messages/batches/message_batch_id?beta=true").mock(
return_value=httpx.Response(
200, json={"results_url": "/v1/messages/batches/message_batch_id/results?beta=true"}
)
)
respx_mock.get("/v1/messages/batches/message_batch_id/results?beta=true").mock(
return_value=httpx.Response(
200, content="\n".join([json.dumps({"foo": "bar"}), json.dumps({"bar": "baz"})])
)
)
results = await async_client.beta.messages.batches.results(
message_batch_id="message_batch_id",
)
assert results.http_response is not None
assert not results.http_response.is_stream_consumed
i = -1
async for result in results:
i += 1
if i == 0:
assert result.to_dict() == {"foo": "bar"}
elif i == 1:
assert result.to_dict() == {"bar": "baz"}
else:
raise RuntimeError(f"iterated too many times, expected 2 times but got {i + 1}")
assert i == 1
assert results.http_response.is_stream_consumed
@pytest.mark.skip(reason="Prism doesn't support application/x-jsonl responses")
@parametrize
async def test_path_params_results(self, async_client: AsyncAnthropic) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_batch_id` but received ''"):
await async_client.beta.messages.batches.results(
message_batch_id="",
)
| TestAsyncBatches |
python | doocs__leetcode | solution/0000-0099/0091.Decode Ways/Solution.py | {
"start": 0,
"end": 321
} | class ____:
def numDecodings(self, s: str) -> int:
n = len(s)
f = [1] + [0] * n
for i, c in enumerate(s, 1):
if c != "0":
f[i] = f[i - 1]
if i > 1 and s[i - 2] != "0" and int(s[i - 2 : i]) <= 26:
f[i] += f[i - 2]
return f[n]
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 580027,
"end": 580801
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for DiscussionPollOption."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("DiscussionPollOptionEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("DiscussionPollOption"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| DiscussionPollOptionConnection |
python | scipy__scipy | benchmarks/benchmarks/signal.py | {
"start": 1166,
"end": 2247
} | class ____(Benchmark):
param_names = ['mode', 'boundary']
params = [
['full', 'valid', 'same'],
['fill', 'wrap', 'symm']
]
def setup(self, mode, boundary):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = []
for ma, na, mb, nb in product((8, 13, 30, 36), repeat=4):
a = rng.standard_normal((ma, na))
b = rng.standard_normal((mb, nb))
pairs.append((a, b))
self.pairs = pairs
def time_convolve2d(self, mode, boundary):
for a, b in self.pairs:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.convolve2d(a, b, mode=mode, boundary=boundary)
def time_correlate2d(self, mode, boundary):
for a, b in self.pairs:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.correlate2d(a, b, mode=mode, boundary=boundary)
| Convolve2D |
python | ray-project__ray | python/ray/_private/metrics_agent.py | {
"start": 5083,
"end": 8383
} | class ____:
def __init__(self, name: str, desc: str, unit: str, label_keys: List[str]):
"""Represents the OpenCensus metrics that will be proxy exported."""
self._name = name
self._desc = desc
self._unit = unit
# -- The label keys of the metric --
self._label_keys = label_keys
# -- The data that needs to be proxy exported --
# tuple of label values -> data (OpenCesnsus Aggregation data)
self._data = {}
@property
def name(self):
return self._name
@property
def desc(self):
return self._desc
@property
def unit(self):
return self._unit
@property
def label_keys(self):
return self._label_keys
@property
def data(self):
return self._data
def is_distribution_aggregation_data(self):
"""Check if the metric is a distribution aggreation metric."""
return len(self._data) > 0 and isinstance(
next(iter(self._data.values())), DistributionAggregationData
)
def add_data(self, label_values: Tuple, data: Any):
"""Add the data to the metric.
Args:
label_values: The label values of the metric.
data: The data to be added.
"""
self._data[label_values] = data
def record(self, metric: Metric):
"""Parse the Opencensus Protobuf and store the data.
The data can be accessed via `data` API once recorded.
"""
timeseries = metric.timeseries
if len(timeseries) == 0:
return
# Create the aggregation and fill it in the our stats
for series in timeseries:
labels = tuple(val.value for val in series.label_values)
# Aggregate points.
for point in series.points:
if (
metric.metric_descriptor.type
== MetricDescriptorType.CUMULATIVE_INT64
):
data = CountAggregationData(point.int64_value)
elif (
metric.metric_descriptor.type
== MetricDescriptorType.CUMULATIVE_DOUBLE
):
data = SumAggregationData(ValueDouble, point.double_value)
elif metric.metric_descriptor.type == MetricDescriptorType.GAUGE_DOUBLE:
data = LastValueAggregationData(ValueDouble, point.double_value)
elif (
metric.metric_descriptor.type
== MetricDescriptorType.CUMULATIVE_DISTRIBUTION
):
dist_value = point.distribution_value
counts_per_bucket = [bucket.count for bucket in dist_value.buckets]
bucket_bounds = dist_value.bucket_options.explicit.bounds
data = DistributionAggregationData(
dist_value.sum / dist_value.count,
dist_value.count,
dist_value.sum_of_squared_deviation,
counts_per_bucket,
bucket_bounds,
)
else:
raise ValueError("Summary is not supported")
self._data[labels] = data
| OpencensusProxyMetric |
python | python-poetry__poetry | src/poetry/plugins/plugin_manager.py | {
"start": 3034,
"end": 11109
} | class ____:
PATH = Path(".poetry") / "plugins"
def __init__(self, poetry: Poetry, io: IO) -> None:
self._poetry = poetry
self._io = io
self._path = poetry.pyproject_path.parent / self.PATH
self._config_file = self._path / "config.toml"
self._gitignore_file = self._path.parent / ".gitignore"
@property
def _plugin_section(self) -> dict[str, Any]:
plugins = self._poetry.local_config.get("requires-plugins", {})
assert isinstance(plugins, dict)
return plugins
@cached_property
def _config(self) -> dict[str, Any]:
return {
"python": sys.version,
"poetry": __version__,
"plugins-hash": hashlib.sha256(
json.dumps(self._plugin_section, sort_keys=True).encode()
).hexdigest(),
}
def ensure_plugins(self) -> None:
from poetry.factory import Factory
# parse project plugins
plugins = []
for name, constraints in self._plugin_section.items():
_constraints = (
constraints if isinstance(constraints, list) else [constraints]
)
for _constraint in _constraints:
plugins.append(Factory.create_dependency(name, _constraint))
if not plugins:
if self._path.exists():
self._io.write_line(
"<info>No project plugins defined."
" Removing the project's plugin cache</info>"
)
self._io.write_line("")
shutil.rmtree(self._path)
return
if self._is_fresh():
if self._io.is_debug():
self._io.write_line("The project's plugin cache is up to date.")
self._io.write_line("")
return
elif self._path.exists():
self._io.write_line(
"Removing the project's plugin cache because it is outdated"
)
# Just remove the cache for two reasons:
# 1. Since the path of the cache has already been added to sys.path
# at this point, we had to distinguish between packages installed
# directly into Poetry's env and packages installed in the project cache.
# 2. Updating packages in the cache does not work out of the box,
# probably, because we use pip to uninstall and pip does not know
# about the cache so that we end up with just overwriting installed
# packages and multiple dist-info folders per package.
# In sum, we keep it simple by always starting from an empty cache
# if something has changed.
shutil.rmtree(self._path)
# determine plugins relevant for Poetry's environment
poetry_env = EnvManager.get_system_env(naive=True)
relevant_plugins = {
plugin.name: plugin
for plugin in plugins
if plugin.marker.validate(poetry_env.marker_env)
}
if not relevant_plugins:
if self._io.is_debug():
self._io.write_line(
"No relevant project plugins for Poetry's environment defined."
)
self._io.write_line("")
self._write_config()
return
self._io.write_line(
"<info>Ensuring that the Poetry plugins required"
" by the project are available...</info>"
)
# check if required plugins are already available
missing_plugin_count = len(relevant_plugins)
satisfied_plugins = set()
insufficient_plugins = []
installed_packages = []
installed_repo = InstalledRepository.load(poetry_env)
for package in installed_repo.packages:
if required_plugin := relevant_plugins.get(package.name):
if package.satisfies(required_plugin):
satisfied_plugins.add(package.name)
installed_packages.append(package)
else:
insufficient_plugins.append((package, required_plugin))
# Do not add the package to installed_packages so that
# the solver does not consider it.
missing_plugin_count -= 1
if missing_plugin_count == 0:
break
else:
installed_packages.append(package)
if missing_plugin_count == 0 and not insufficient_plugins:
# all required plugins are installed and satisfy the requirements
self._write_config()
self._io.write_line(
"All required plugins have already been installed"
" in Poetry's environment."
)
self._io.write_line("")
return
if insufficient_plugins and self._io.is_debug():
plugins_str = "\n".join(
f" - {req}\n installed: {p}" for p, req in insufficient_plugins
)
self._io.write_line(
"The following Poetry plugins are required by the project"
f" but are not satisfied by the installed versions:\n{plugins_str}"
)
# install missing plugins
missing_plugins = [
plugin
for name, plugin in relevant_plugins.items()
if name not in satisfied_plugins
]
plugins_str = "\n".join(f" - {p}" for p in missing_plugins)
self._io.write_line(
"The following Poetry plugins are required by the project"
f" but are not installed in Poetry's environment:\n{plugins_str}\n"
f"Installing Poetry plugins only for the current project..."
)
self._install(missing_plugins, poetry_env, installed_packages)
self._io.write_line("")
self._write_config()
def _is_fresh(self) -> bool:
if not self._config_file.exists():
return False
with self._config_file.open("rb") as f:
stored_config = tomllib.load(f)
return stored_config == self._config
def _install(
self,
plugins: Sequence[Dependency],
poetry_env: Env,
locked_packages: Sequence[Package],
) -> None:
project = ProjectPackage(name="poetry-project-instance", version="0")
project.python_versions = ".".join(str(v) for v in poetry_env.version_info[:3])
# consider all packages in Poetry's environment pinned
for package in locked_packages:
project.add_dependency(package.to_dependency())
# add missing plugin dependencies
for dependency in plugins:
project.add_dependency(dependency)
# force new package to be installed in the project cache instead of Poetry's env
poetry_env.set_paths(purelib=self._path, platlib=self._path)
self._ensure_cache_directory()
installer = Installer(
self._io,
poetry_env,
project,
Locker(self._path / "poetry.lock", {}),
self._poetry.pool,
self._poetry.config,
# Build installed repository from locked packages so that plugins
# that may be overwritten are not included.
InstalledRepository(locked_packages),
)
installer.update(True)
if installer.run() != 0:
raise RuntimeError("Failed to install required Poetry plugins")
def _write_config(self) -> None:
self._ensure_cache_directory()
document = tomlkit.document()
for key, value in self._config.items():
document[key] = value
TOMLFile(self._config_file).write(data=document)
def _ensure_cache_directory(self) -> None:
if self._path.exists():
return
self._path.mkdir(parents=True, exist_ok=True)
# only write .gitignore if path did not exist before
self._gitignore_file.write_text("*", encoding="utf-8")
| ProjectPluginCache |
python | tensorflow__tensorflow | tensorflow/compiler/tests/cast_test.py | {
"start": 891,
"end": 4151
} | class ____(xla_test.XLATestCase):
def test_cast(self):
types = {
dtypes.bool,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.int32,
dtypes.int64,
dtypes.uint32,
dtypes.uint64,
}
with self.session() as session:
for src_type in types:
for dst_type in types:
self._test_cast(src_type, dst_type, session)
def test_cast_fp8(self):
if platform.system() == "Darwin":
# TODO(b/271327511): Fix issue where casts to FP8 very rarely result in
# NaN on Mac
self.skipTest("Casts to FP8 sometimes result in NaN on Mac")
fp8_types = {
dtypes.float8_e5m2,
dtypes.float8_e4m3fn,
dtypes.float8_e4m3fnuz,
dtypes.float8_e4m3b11fnuz,
dtypes.float8_e5m2fnuz,
}
other_types = {
dtypes.bool,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.int32,
dtypes.int64,
dtypes.uint32,
dtypes.uint64,
}
with self.session() as session:
for fp8_type in fp8_types:
for other_type in other_types | fp8_types:
self._test_cast(fp8_type, other_type, session)
self._test_cast(other_type, fp8_type, session)
def _test_cast(self, src_type, dst_type, session):
with self.subTest(src_type=src_type, dst_type=dst_type):
shapes = [[], [4], [2, 3], [2, 0, 4]]
src_np_dtype = src_type.as_numpy_dtype
dst_np_dtype = dst_type.as_numpy_dtype
for shape in shapes:
src = np.arange(np.prod(shape)).astype(src_np_dtype)
if src_type in self.complex_tf_types:
src += (np.arange(np.prod(shape)) * 2j).astype(src_np_dtype)
src = src.reshape(shape)
dst = src.astype(dst_np_dtype)
self.assert_op_output_matches_expected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst,
local_session=session,
)
# Check special values.
if src_type.is_integer:
imin = np.iinfo(src_np_dtype).min
imax = np.iinfo(src_np_dtype).max
if src_type.is_unsigned:
src = np.array([imin, imax, 0, 1], dtype=src_np_dtype)
else:
src = np.array([imin, imax, 0, 1, -1], dtype=src_np_dtype)
elif src_type in self.float_tf_types:
if dst_type.is_integer:
imin = np.iinfo(dst_np_dtype).min
imax = np.iinfo(dst_np_dtype).max // 2
src = np.array([imin, imax, 0, 1], dtype=src_np_dtype)
elif dst_type in self.float_tf_types:
fmin = np.finfo(dst_np_dtype).min
fmax = np.finfo(dst_np_dtype).max
tiny = np.finfo(dst_np_dtype).tiny
eps = np.finfo(dst_np_dtype).eps
src = np.array(
[fmin, fmax, np.nan, eps, -eps, tiny, -tiny, np.inf, -np.inf],
dtype=src_np_dtype,
)
dst = src.astype(dst_np_dtype)
self.assert_op_output_matches_expected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst,
local_session=session,
)
def test_give_me_a_name(self):
pass
if __name__ == "__main__":
googletest.main()
| CastTest |
python | docker__docker-py | docker/api/network.py | {
"start": 123,
"end": 10672
} | class ____:
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'Options': options,
'IPAM': ipam,
'CheckDuplicate': check_duplicate,
}
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
if ingress is not None:
if version_lt(self._version, '1.29'):
raise InvalidVersion(
'ingress is not supported in API version < 1.29'
)
data['Ingress'] = ingress
if scope is not None:
if version_lt(self._version, '1.30'):
raise InvalidVersion(
'scope is not supported in API version < 1.30'
)
data['Scope'] = scope
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.25')
def prune_networks(self, filters=None):
"""
Delete unused networks
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
@check_resource('net_id')
def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@check_resource('net_id')
def inspect_network(self, net_id, verbose=None, scope=None):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
if scope is not None:
if version_lt(self._version, '1.31'):
raise InvalidVersion('scope was introduced in API 1.31')
params['scope'] = scope
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True)
@check_resource('container')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None, driver_opt=None,
mac_address=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
mac_address (str): The MAC address of this container on the
network. Defaults to ``None``.
"""
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips,
driver_opt=driver_opt,
mac_address=mac_address
),
}
url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
@check_resource('container')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
| NetworkApiMixin |
python | plotly__plotly.py | plotly/graph_objs/scattermap/marker/_colorbar.py | {
"start": 233,
"end": 61703
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermap.marker"
_path_str = "scattermap.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.scattermap.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scattermap.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.scattermap.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scattermap.mar
ker.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
scattermap.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.scattermap.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.scattermap.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scattermap.mark
er.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rmap.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scattermap.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattermap.marker.colorbar
.Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermap.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scattermap.mark
er.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rmap.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scattermap.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattermap.marker.colorbar
.Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermap.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermap.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 30751,
"end": 31289
} | class ____(TestBasicOps, TestCase):
def setUp(self):
super().setUp()
self.case = "unit OrderedSet (number)"
self.values = [3]
self.OrderedSet = OrderedSet(self.values)
self.dup = OrderedSet(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.OrderedSet)
def test_not_in(self):
self.assertNotIn(2, self.OrderedSet)
# ------------------------------------------------------------------------------
| TestBasicOpsSingleton |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/resources/pythonic_resources.py | {
"start": 9909,
"end": 10123
} | class ____(AbstractContextManager):
def execute(self, query: str):
return None
def __enter__(self) -> "Connection":
return self
def __exit__(self, *args):
return False
| Connection |
python | pytorch__pytorch | test/distributed/pipelining/test_schedule.py | {
"start": 15290,
"end": 16687
} | class ____(TestCase):
@parametrize(
"ScheduleClass,csv_name",
[
(ScheduleDualPipeV, "dualpipev_4rank_10mb"),
],
)
def test_csv_compare(self, ScheduleClass, csv_name):
"""
Test that schedules matches the expected CSV. This is a regression test to ensure that the schedule
is not changed unintentionally.
"""
num_local_stages = 2
group_size = 4
num_stages = num_local_stages * group_size
stages = [
MockPipelineStage(group_size=group_size, num_stages=num_stages)
for _ in range(num_local_stages)
]
num_microbatches = 10
schedule = ScheduleClass(stages, num_microbatches)
comms_csv = os.path.join(ARTIFACTS_DIR, f"{csv_name}.csv")
sch = schedule.pipeline_order
# Uncomment to regenerate reference output
# schedule._dump_csv("test.csv", "compute_only")
sch_ref = {}
with open(comms_csv, newline="") as ref:
for rank, row in enumerate(csv.reader(ref)):
sch_ref[rank] = [_Action.from_str(s) for s in row]
for rank in sch_ref:
for timestep, (a, b) in enumerate(zip(sch[rank], sch_ref[rank])):
self.assertEqual(a, b, f"Mismatch at {timestep=}, {a=}, expected {b}")
instantiate_parametrized_tests(TestScheduleCsv)
| TestScheduleCsv |
python | ansible__ansible | test/units/module_utils/facts/virtual/test_hpux.py | {
"start": 284,
"end": 2202
} | class ____(HPUXVirtual):
def __init__(self, module):
self.module = module
def mock_path_exists_vecheck(filename):
return filename in ("/usr/sbin/vecheck",)
def mock_path_exists_hpvminfo(filename):
return filename in ("/opt/hpvm/bin/hpvminfo",)
def mock_path_exists_parstatus(filename):
return filename in ("/usr/sbin/parstatus",)
@pytest.mark.parametrize(
("mock_method", "expected_type", "mock_output", "expected_guest"),
[
pytest.param(
mock_path_exists_vecheck,
"guest",
"",
"HP vPar",
id="HP vPar",
),
pytest.param(
mock_path_exists_hpvminfo,
"guest",
"Running HPVM vPar",
"HPVM vPar",
id="HPVM vPar",
),
pytest.param(
mock_path_exists_hpvminfo,
"guest",
"Running HPVM guest",
"HPVM IVM",
id="HPVM IVM",
),
pytest.param(
mock_path_exists_hpvminfo,
"host",
"Running HPVM host",
"HPVM",
id="HPVM",
),
pytest.param(
mock_path_exists_parstatus,
"guest",
"",
"HP nPar",
id="HP nPar",
),
],
)
def test_get_virtual_facts_hpvpar(mocker, mock_method, expected_type, mock_output, expected_guest):
mocker.patch("os.path.exists", side_effect=mock_method)
module = mocker.Mock()
module.run_command.return_value = (0, mock_output, "")
mixin = MockVirtualSysctl(module=module)
guest_facts = mixin.get_virtual_facts()
expected = {
"virtualization_role": expected_guest,
"virtualization_tech_guest": set([expected_guest]),
"virtualization_tech_host": set(),
"virtualization_type": expected_type,
}
assert guest_facts == expected
| MockVirtualSysctl |
python | kamyu104__LeetCode-Solutions | Python/find-building-where-alice-and-bob-can-meet.py | {
"start": 84,
"end": 2166
} | class ____(object):
def leftmostBuildingQueries(self, heights, queries):
"""
:type heights: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
# Range Maximum Query
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: None,
query_fn=lambda x, y: max(x, y)):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.build_fn = build_fn
self.query_fn = query_fn
self.build(0, N-1, 1)
def build(self, left, right, idx):
if left == right:
self.tree[idx] = self.build_fn(left)
return
mid = left + (right-left)//2
self.build(left, mid, idx*2)
self.build(mid+1, right, idx*2+1)
self.tree[idx] = self.query_fn(self.tree[idx*2], self.tree[idx*2+1])
def binary_search(self, L, R, left, right, idx, h):
if right < L or left > R:
return -1
if L <= left and right <= R:
if not self.tree[idx] > h:
return -1
if left == right:
return left
mid = left + (right-left)//2
i = self.binary_search(L, R, left, mid, idx*2, h)
return i if i != -1 else self.binary_search(L, R, mid+1, right, idx*2+1, h)
def build(i):
return heights[i]
result = [-1]*len(queries)
st = SegmentTree(len(heights), build_fn=build)
for i, (a, b) in enumerate(queries):
if a > b:
a, b = b, a
if a == b or heights[a] < heights[b]:
result[i] = b
continue
result[i] = st.binary_search(b+1, len(heights)-1, 0, len(heights)-1, 1, heights[a])
return result
# Time: O(n + qlogq)
# Space: O(n + q)
import heapq
# offline solution, heap
| Solution |
python | pypa__warehouse | tests/unit/admin/views/test_prohibited_user_names.py | {
"start": 2083,
"end": 3984
} | class ____:
def test_get(self):
request = pretend.stub(method="GET")
assert views.bulk_add_prohibited_user_names(request) == {}
def test_bulk_add(self, db_request):
db_request.user = UserFactory.create()
db_request.method = "POST"
already_existing_prohibition = ProhibitedUserName(
name="prohibition-already-exists",
prohibited_by=db_request.user,
comment="comment",
)
db_request.db.add(already_existing_prohibition)
already_existing_user = UserFactory.create(username="user-already-exists")
UserFactory.create(username="deleted-user")
user_names = [
already_existing_prohibition.name,
already_existing_user.username,
"doesnt-already-exist",
]
db_request.POST["users"] = "\n".join(user_names)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = lambda a: "/admin/prohibited_user_names/bulk"
result = views.bulk_add_prohibited_user_names(db_request)
assert db_request.session.flash.calls == [
pretend.call(
f"Prohibited {len(user_names)!r} users",
queue="success",
)
]
assert result.status_code == 303
assert result.headers["Location"] == "/admin/prohibited_user_names/bulk"
for user_name in user_names:
prohibition = (
db_request.db.query(ProhibitedUserName)
.filter(ProhibitedUserName.name == user_name)
.one()
)
assert prohibition.name == user_name
assert prohibition.prohibited_by == db_request.user
assert db_request.db.query(User).filter(User.name == user_name).count() == 0
| TestBulkAddProhibitedUserName |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/urllib3/util/retry.py | {
"start": 2775,
"end": 22013
} | class ____(object):
"""Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param iterable allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``False`` value to retry on any verb.
.. warning::
Previously this parameter was named ``method_whitelist``, that
usage is deprecated in v1.26.0 and will be removed in v2.0.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
#: Default methods to be used for ``allowed_methods``
DEFAULT_ALLOWED_METHODS = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
#: Default status codes to be used for ``status_forcelist``
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Default headers to be used for ``remove_headers_on_redirect``
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
#: Maximum backoff time.
DEFAULT_BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
other=None,
allowed_methods=_Default,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
remove_headers_on_redirect=_Default,
# TODO: Deprecated, remove in v2.0
method_whitelist=_Default,
):
if method_whitelist is not _Default:
if allowed_methods is not _Default:
raise ValueError(
"Using both 'allowed_methods' and "
"'method_whitelist' together is not allowed. "
"Instead only use 'allowed_methods'"
)
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
stacklevel=2,
)
allowed_methods = method_whitelist
if allowed_methods is _Default:
allowed_methods = self.DEFAULT_ALLOWED_METHODS
if remove_headers_on_redirect is _Default:
remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
self.total = total
self.connect = connect
self.read = read
self.status = status
self.other = other
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.allowed_methods = allowed_methods
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
[h.lower() for h in remove_headers_on_redirect]
)
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
other=self.other,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
)
# TODO: If already given in **kw we use what's given to us
# If not given we need to figure out what to pass. We decide
# based on whether our class has the 'method_whitelist' property
# and if so we pass the deprecated 'method_whitelist' otherwise
# we use 'allowed_methods'. Remove in v2.0
if "method_whitelist" not in kw and "allowed_methods" not in kw:
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
params["method_whitelist"] = self.allowed_methods
else:
params["allowed_methods"] = self.allowed_methods
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
"""Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
"""Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate_tz(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
if retry_date_tuple[9] is None: # Python 2
# Assume UTC if no timezone was specified
# On Python2.7, parsedate_tz returns None for a timezone offset
# instead of 0 if no timezone is given, where mktime_tz treats
# a None timezone offset as local time.
retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
retry_date = email.utils.mktime_tz(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
"""Get the value of Retry-After in seconds."""
retry_after = response.headers.get("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
"""Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
"""Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
"""Checks if a given HTTP method should be retried upon, depending if
it is included in the allowed_methods
"""
# TODO: For now favor if the Retry implementation sets its own method_whitelist
# property outside of our constructor to avoid breaking custom implementations.
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
allowed_methods = self.method_whitelist
else:
allowed_methods = self.allowed_methods
if allowed_methods and method.upper() not in allowed_methods:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
"""Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
"""Are we out of retries?"""
retry_counts = (
self.total,
self.connect,
self.read,
self.redirect,
self.status,
self.other,
)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
"{cls.__name__}(total={self.total}, connect={self.connect}, "
"read={self.read}, redirect={self.redirect}, status={self.status})"
).format(cls=type(self), self=self)
def __getattr__(self, item):
if item == "method_whitelist":
# TODO: Remove this deprecated alias in v2.0
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
return self.allowed_methods
try:
return getattr(super(Retry, self), item)
except AttributeError:
return getattr(Retry, item)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| Retry |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 47417,
"end": 48186
} | class ____(TestCase):
validator = None
def application(self, env, start_response):
try:
start_response('304 Not modified', [('Content-Length', '100')])
except AssertionError as ex:
start_response('200 Raised', [])
return ex.args
raise AssertionError('start_response did not fail but it should')
def test_err(self):
body = "Invalid Content-Length for 304 response: '100' (must be absent or zero)"
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, code=200, reason='Raised', body=body, chunks=False)
garbage = fd.read()
self.assertEqual(garbage, b"")
| TestContentLength304 |
python | numba__numba | numba/tests/test_mangling.py | {
"start": 248,
"end": 1262
} | class ____(TestCase):
def test_one_args(self):
fname = 'foo'
argtypes = types.int32,
name = default_mangler(fname, argtypes)
self.assertEqual(name, '_Z3fooi')
def test_two_args(self):
fname = 'foo'
argtypes = types.int32, types.float32
name = default_mangler(fname, argtypes)
self.assertEqual(name, '_Z3fooif')
def test_unicode_fname(self):
fname = u'foಠ'
argtypes = types.int32, types.float32
name = default_mangler(fname, argtypes)
self.assertIsInstance(name, str)
# manually encode it
unichar = fname[2]
enc = ''.join('_{:02x}'.format(c)
for c in unichar.encode('utf8'))
text = 'fo' + enc
expect = '_Z{}{}if'.format(len(text), text)
self.assertEqual(name, expect)
# ensure result chars are in the right charset
self.assertRegex(name, r'^_Z[a-zA-Z0-9_\$]+$')
if __name__ == '__main__':
unittest.main()
| TestMangling |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 41558,
"end": 44920
} | class ____(nn.Module):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://huggingface.co/papers/1807.10221).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
self.channels = config.hidden_size
self.align_corners = False
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
# PSP Module
self.psp_modules = Data2VecVisionPyramidPoolingModule(
self.pool_scales,
self.in_channels[-1],
self.channels,
align_corners=self.align_corners,
)
self.bottleneck = Data2VecVisionConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = Data2VecVisionConvModule(in_channels, self.channels, kernel_size=1)
fpn_conv = Data2VecVisionConvModule(self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = Data2VecVisionConvModule(
len(self.in_channels) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# build laterals
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
)
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
# Copied from transformers.models.beit.modeling_beit.BeitFCNHead with Beit->Data2VecVision
| Data2VecVisionUperHead |
python | bokeh__bokeh | src/bokeh/models/widgets/markups.py | {
"start": 3527,
"end": 4063
} | class ____(Markup):
''' A block (div) of text.
This Bokeh model corresponds to an HTML ``<div>`` element.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/interaction/widgets/div.py"
render_as_text = Bool(False, help="""
Whether the contents should be rendered as raw text or as interpreted HTML.
The default value is False, meaning contents are rendered as HTML.
""")
| Div |
python | huggingface__transformers | src/transformers/models/idefics2/modeling_idefics2.py | {
"start": 16954,
"end": 17869
} | class ____(PreTrainedModel):
config: Idefics2Config
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["Idefics2VisionAttention", "Idefics2MLP", "Idefics2PerceiverLayer", "Idefics2DecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Idefics2MultiheadAttentionPoolingHead):
init.normal_(module.probe)
elif isinstance(module, Idefics2PerceiverResampler):
init.ones_(module.latents)
@auto_docstring(
custom_intro="""
Idefics2 vision encoder model that returnss raw image embeddings.
"""
)
| Idefics2PreTrainedModel |
python | sympy__sympy | sympy/functions/elementary/hyperbolic.py | {
"start": 57851,
"end": 64476
} | class ____(InverseHyperbolicFunction):
"""
``asech(x)`` is the inverse hyperbolic secant of ``x``.
The inverse hyperbolic secant function.
Examples
========
>>> from sympy import asech, sqrt, S
>>> from sympy.abc import x
>>> asech(x).diff(x)
-1/(x*sqrt(1 - x**2))
>>> asech(1).diff(x)
0
>>> asech(1)
0
>>> asech(S(2))
I*pi/3
>>> asech(-sqrt(2))
3*I*pi/4
>>> asech((sqrt(6) - sqrt(2)))
I*pi/12
See Also
========
sympy.functions.elementary.hyperbolic.asinh
sympy.functions.elementary.hyperbolic.atanh
sympy.functions.elementary.hyperbolic.cosh
sympy.functions.elementary.hyperbolic.acoth
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
.. [2] https://dlmf.nist.gov/4.37
.. [3] https://functions.wolfram.com/ElementaryFunctions/ArcSech/
"""
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -1/(z*sqrt(1 - z**2))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity or arg is S.NegativeInfinity:
return pi*I / 2
elif arg.is_zero:
return S.Infinity
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return pi*I
if arg.is_number:
cst_table = _asech_table()
if arg in cst_table:
if arg.is_extended_real:
return cst_table[arg]*I
return cst_table[arg]
if arg is S.ComplexInfinity:
from sympy.calculus.accumulationbounds import AccumBounds
return I*AccumBounds(-pi/2, pi/2)
if arg.is_zero:
return S.Infinity
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return log(2 / x)
elif n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2 and n > 2:
p = previous_terms[-2]
return p * ((n - 1)*(n-2)) * x**2/(4 * (n//2)**2)
else:
k = n // 2
R = RisingFactorial(S.Half, k) * n
F = factorial(k) * n // 2 * n // 2
return -1 * R / F * x**n / 4
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
# Handling branch points
if x0 in (-S.One, S.Zero, S.One, S.ComplexInfinity):
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir)
if x0 is S.NaN:
expr = self.func(arg.as_leading_term(x))
if expr.is_finite:
return expr
else:
return self
# Handling points lying on branch cuts (-oo, 0] U (1, oo)
if x0.is_negative or (1 - x0).is_negative:
ndir = arg.dir(x, cdir if cdir else 1)
if im(ndir).is_positive:
if x0.is_positive or (x0 + 1).is_negative:
return -self.func(x0)
return self.func(x0) - 2*I*pi
elif not im(ndir).is_negative:
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir)
return self.func(x0)
def _eval_nseries(self, x, n, logx, cdir=0): # asech
from sympy.series.order import O
arg = self.args[0]
arg0 = arg.subs(x, 0)
# Handling branch points
if arg0 is S.One:
t = Dummy('t', positive=True)
ser = asech(S.One - t**2).rewrite(log).nseries(t, 0, 2*n)
arg1 = S.One - self.args[0]
f = arg1.as_leading_term(x)
g = (arg1 - f)/ f
if not g.is_meromorphic(x, 0): # cannot be expanded
return O(1) if n == 0 else O(sqrt(x))
res1 = sqrt(S.One + g)._eval_nseries(x, n=n, logx=logx)
res = (res1.removeO()*sqrt(f)).expand()
return ser.removeO().subs(t, res).expand().powsimp() + O(x**n, x)
if arg0 is S.NegativeOne:
t = Dummy('t', positive=True)
ser = asech(S.NegativeOne + t**2).rewrite(log).nseries(t, 0, 2*n)
arg1 = S.One + self.args[0]
f = arg1.as_leading_term(x)
g = (arg1 - f)/ f
if not g.is_meromorphic(x, 0): # cannot be expanded
return O(1) if n == 0 else I*pi + O(sqrt(x))
res1 = sqrt(S.One + g)._eval_nseries(x, n=n, logx=logx)
res = (res1.removeO()*sqrt(f)).expand()
return ser.removeO().subs(t, res).expand().powsimp() + O(x**n, x)
res = super()._eval_nseries(x, n=n, logx=logx)
if arg0 is S.ComplexInfinity:
return res
# Handling points lying on branch cuts (-oo, 0] U (1, oo)
if arg0.is_negative or (1 - arg0).is_negative:
ndir = arg.dir(x, cdir if cdir else 1)
if im(ndir).is_positive:
if arg0.is_positive or (arg0 + 1).is_negative:
return -res
return res - 2*I*pi
elif not im(ndir).is_negative:
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
return res
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sech
def _eval_rewrite_as_log(self, arg, **kwargs):
return log(1/arg + sqrt(1/arg - 1) * sqrt(1/arg + 1))
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _eval_rewrite_as_acosh(self, arg, **kwargs):
return acosh(1/arg)
def _eval_rewrite_as_asinh(self, arg, **kwargs):
return sqrt(1/arg - 1)/sqrt(1 - 1/arg)*(I*asinh(I/arg, evaluate=False)
+ pi*S.Half)
def _eval_rewrite_as_atanh(self, x, **kwargs):
return (I*pi*(1 - sqrt(x)*sqrt(1/x) - I/2*sqrt(-x)/sqrt(x) - I/2*sqrt(x**2)/sqrt(-x**2))
+ sqrt(1/(x + 1))*sqrt(x + 1)*atanh(sqrt(1 - x**2)))
def _eval_rewrite_as_acsch(self, x, **kwargs):
return sqrt(1/x - 1)/sqrt(1 - 1/x)*(pi/2 - I*acsch(I*x, evaluate=False))
def _eval_is_extended_real(self):
return fuzzy_and([self.args[0].is_extended_real, self.args[0].is_nonnegative, (1 - self.args[0]).is_nonnegative])
def _eval_is_finite(self):
return fuzzy_not(self.args[0].is_zero)
| asech |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/attributes.py | {
"start": 3192,
"end": 3331
} | class ____(MutableMapping[str, str]):
def copy(self) -> PartiallyZonedDict:
return self
@dataclass(frozen=True)
| PartiallyZonedDict |
python | pytorch__pytorch | test/distributed/tensor/test_op_strategy.py | {
"start": 24903,
"end": 26013
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 2
@with_comms
def test_cache_clean(self):
mesh = self.build_device_mesh()
test_op = torch.ops.mylib.numpy_sin
x = torch.randn(2, device=self.device_type)
y = torch.randn(2, device=self.device_type)
x_dt = distribute_tensor(x, mesh, [Shard(0)])
y_dt = distribute_tensor(y, mesh, [Shard(0)])
with op_strategy_context(test_op.default, replicate_op_strategy):
self._test_op_on_dtensor(test_op, x_dt, y_dt)
with self.assertRaisesRegex(
NotImplementedError,
f"Operator {test_op.default} does not have a sharding strategy registered",
):
self._test_op_on_dtensor(test_op, x_dt, y_dt)
DistTensorReplicateStrategyRegistrationTestWithLocalTensor = (
create_local_tensor_test_class(
DistTensorReplicateStrategyRegistrationTest,
)
)
TestStrategyHashingWithLocalTensor = create_local_tensor_test_class(
TestStrategyHashing,
)
if __name__ == "__main__":
run_tests()
| TestStrategyOperation |
python | ethereum__web3.py | tests/core/middleware/test_middleware.py | {
"start": 185,
"end": 380
} | class ____(Web3Middleware):
def response_processor(self, method, response):
if method == "eth_blockNumber":
response["result"] = 1234
return response
| MockMiddleware |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 472,
"end": 510
} | class ____:
__slots__ = []
| SecondGood |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/utils/test_waiter_with_logging.py | {
"start": 1272,
"end": 12592
} | class ____:
@mock.patch("time.sleep")
def test_wait(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response=generate_response("Pending"),
)
mock_waiter.wait.side_effect = [error, error, True]
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter.wait.call_count == 3
mock_sleep.assert_called_with(123)
@pytest.mark.asyncio
async def test_async_wait(self):
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response=generate_response("Pending"),
)
mock_waiter.wait = AsyncMock()
mock_waiter.wait.side_effect = [error, error, True]
await async_wait(
waiter=mock_waiter,
waiter_delay=0,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter.wait.call_count == 3
@pytest.mark.asyncio
async def test_async_wait_with_unknown_failure(self):
mock_waiter = mock.MagicMock()
service_exception = WaiterError(
name="test_waiter",
reason="An error occurred",
last_response={
"Error": {
"Message": "Not authorized to perform: states:DescribeExecution on resource",
"Code": "AccessDeniedException",
}
},
)
mock_waiter.wait = AsyncMock()
mock_waiter.wait.side_effect = [service_exception]
with pytest.raises(AirflowException) as exc:
await async_wait(
waiter=mock_waiter,
waiter_delay=0,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert "An error occurred" in str(exc)
assert mock_waiter.wait.call_count == 1
@mock.patch("time.sleep")
def test_wait_max_attempts_exceeded(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response=generate_response("Pending"),
)
mock_waiter.wait.side_effect = [error, error, error]
with pytest.raises(AirflowException) as exc:
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=2,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
assert "Waiter error: max attempts reached" in str(exc)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter.wait.call_count == 2
mock_sleep.assert_called_with(123)
@mock.patch("time.sleep")
def test_wait_with_failure(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response=generate_response("Pending"),
)
failure_error = WaiterError(
name="test_waiter",
reason="terminal failure in waiter",
last_response=generate_response("Failure"),
)
mock_waiter.wait.side_effect = [error, error, error, failure_error]
with pytest.raises(AirflowException) as exc:
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=10,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
assert "test failure message" in str(exc)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter.wait.call_count == 4
@mock.patch("time.sleep")
def test_wait_with_unknown_failure(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
service_exception = WaiterError(
name="test_waiter",
reason="An error occurred",
last_response={
"Error": {
"Message": "Not authorized to perform: states:DescribeExecution on resource",
"Code": "AccessDeniedException",
}
},
)
mock_waiter.wait.side_effect = [service_exception]
with pytest.raises(AirflowException) as exc:
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=10,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
assert "An error occurred" in str(exc)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter.wait.call_count == 1
@mock.patch("time.sleep")
def test_wait_with_list_response(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response={
"Clusters": [
{
"Status": "Pending",
},
{
"Status": "Pending",
},
]
},
)
mock_waiter.wait.side_effect = [error, error, True]
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Clusters[0].Status"],
)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
mock_waiter.wait.call_count == 3
mock_sleep.assert_called_with(123)
@mock.patch("time.sleep")
def test_wait_with_incorrect_args(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response={
"Clusters": [
{
"Status": "Pending",
},
{
"Status": "Pending",
},
]
},
)
mock_waiter.wait.side_effect = [error, error, True]
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Clusters[0].State"], # this does not exist in the response
)
mock_waiter.wait.assert_called_with(
**{"test_arg": "test_value"},
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter.wait.call_count == 3
mock_sleep.assert_called_with(123)
@mock.patch("time.sleep")
def test_wait_with_multiple_args(self, mock_sleep):
mock_sleep.return_value = True
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response={
"Clusters": [
{
"Status": "Pending",
"StatusDetails": "test_details",
"ClusterName": "test_name",
},
]
},
)
mock_waiter.wait.side_effect = [error, error, True]
wait(
waiter=mock_waiter,
waiter_delay=123,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Clusters[0].Status", "Clusters[0].StatusDetails", "Clusters[0].ClusterName"],
)
assert mock_waiter.wait.call_count == 3
mock_sleep.assert_called_with(123)
@mock.patch.object(_LazyStatusFormatter, "__str__")
def test_status_formatting_not_done_if_higher_log_level(self, status_format_mock: mock.MagicMock):
status_format_mock.return_value = "test_status"
mock_waiter = mock.MagicMock()
error = WaiterError(
name="test_waiter",
reason="test_reason",
last_response=generate_response("Pending"),
)
logger = logging.getLogger(wait.__module__)
level = logger.getEffectiveLevel()
logger.setLevel(logging.WARNING)
try:
mock_waiter.wait.side_effect = [error, error, True]
wait(
waiter=mock_waiter,
waiter_delay=0,
waiter_max_attempts=456,
args={"test_arg": "test_value"},
failure_message="test failure message",
status_message="test status message",
status_args=["Status.State"],
)
finally:
logger.setLevel(level)
status_format_mock.assert_not_called()
| TestWaiter |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pytest_style/PT009.py | {
"start": 18,
"end": 2995
} | class ____(unittest.TestCase):
def test_xxx(self):
assert 1 == 1 # OK no parameters
def test_assert_true(self):
expr = 1
msg = "Must be True"
self.assertTrue(expr) # Error
self.assertTrue(expr=expr) # Error
self.assertTrue(expr, msg) # Error
self.assertTrue(expr=expr, msg=msg) # Error
self.assertTrue(msg=msg, expr=expr) # Error
self.assertTrue(*(expr, msg)) # Error, unfixable
self.assertTrue(**{"expr": expr, "msg": msg}) # Error, unfixable
self.assertTrue(msg=msg, expr=expr, unexpected_arg=False) # Error, unfixable
self.assertTrue(msg=msg) # Error, unfixable
(
self.assertIsNotNone(value) # Error, unfixable
if expect_condition
else self.assertIsNone(value) # Error, unfixable
)
return self.assertEqual(True, False) # Error, unfixable
def test_assert_false(self):
self.assertFalse(True) # Error
def test_assert_equal(self):
self.assertEqual(1, 2) # Error
def test_assert_not_equal(self):
self.assertNotEqual(1, 1) # Error
def test_assert_greater(self):
self.assertGreater(1, 2) # Error
def test_assert_greater_equal(self):
self.assertGreaterEqual(1, 2) # Error
def test_assert_less(self):
self.assertLess(2, 1) # Error
def test_assert_less_equal(self):
self.assertLessEqual(1, 2) # Error
def test_assert_in(self):
self.assertIn(1, [2, 3]) # Error
def test_assert_not_in(self):
self.assertNotIn(2, [2, 3]) # Error
def test_assert_is_none(self):
self.assertIsNone(0) # Error
def test_assert_is_not_none(self):
self.assertIsNotNone(0) # Error
def test_assert_is(self):
self.assertIs([], []) # Error
def test_assert_is_not(self):
self.assertIsNot(1, 1) # Error
def test_assert_is_instance(self):
self.assertIsInstance(1, str) # Error
def test_assert_is_not_instance(self):
self.assertNotIsInstance(1, int) # Error
def test_assert_regex(self):
self.assertRegex("abc", r"def") # Error
def test_assert_not_regex(self):
self.assertNotRegex("abc", r"abc") # Error
def test_assert_regexp_matches(self):
self.assertRegexpMatches("abc", r"def") # Error
def test_assert_not_regexp_matches(self):
self.assertNotRegex("abc", r"abc") # Error
def test_fail_if(self):
self.failIf("abc") # Error
def test_fail_unless(self):
self.failUnless("abc") # Error
def test_fail_unless_equal(self):
self.failUnlessEqual(1, 2) # Error
def test_fail_if_equal(self):
self.failIfEqual(1, 2) # Error
# Regression test for: https://github.com/astral-sh/ruff/issues/7455#issuecomment-1722459517
(self.assertTrue(
"piAx_piAy_beta[r][x][y] = {17}".format(
self.model.piAx_piAy_beta[r][x][y])))
| Test |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 28057,
"end": 28507
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to process a request
because the request entity is larger than the server is willing or
able to process.
code: 413, title: Request Entity Too Large
"""
code = 413
title = 'Request Entity Too Large'
explanation = 'The body of your request was too large for this server.'
| HTTPRequestEntityTooLarge |
python | pytorch__pytorch | torch/optim/lr_scheduler.py | {
"start": 22331,
"end": 25654
} | class ____(LRScheduler):
"""Decays the learning rate of each parameter group by gamma every step_size epochs.
Notice that such decay can happen simultaneously with other changes to the learning rate
from outside this scheduler. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
step_size (int): Period of learning rate decay.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # xdoctest: +SKIP
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 60
>>> # lr = 0.0005 if 60 <= epoch < 90
>>> # ...
>>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
.. image:: ../scripts/lr_scheduler_images/StepLR.png
"""
def __init__(
self,
optimizer: Optimizer,
step_size: int,
gamma: float = 0.1,
last_epoch: int = -1,
) -> None: # noqa: D107
self.step_size = step_size
self.gamma = gamma
super().__init__(optimizer, last_epoch)
@override
def get_lr(self) -> list[float | Tensor]:
r"""Compute the next learning rate for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups`.
If the current epoch is a non-zero multiple of :attr:`step_size`, we
scale the current ``group["lr"]``\s in the optimizer's
:attr:`~torch.optim.Optimizer.param_groups` by :attr:`gamma`.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
.. note::
If you're trying to inspect the most recent learning rate, use
:meth:`get_last_lr()` instead.
.. note::
The returned :class:`~torch.Tensor`\s are copies, and never alias
the optimizer's ``group["lr"]``\s.
"""
_warn_get_lr_called_within_step(self)
if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0):
return _param_groups_val_list(self.optimizer, "lr")
return [group["lr"] * self.gamma for group in self.optimizer.param_groups]
def _get_closed_form_lr(self) -> list[float | Tensor]:
r"""Compute learning rates for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups` at :attr:`last_epoch` using
a closed-form formula.
Uses :attr:`base_lrs` to compute learning rates. This method is called
when an epoch is passed to :meth:`step`.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
"""
return [
base_lr * self.gamma ** (self.last_epoch // self.step_size)
for base_lr in self.base_lrs
]
| StepLR |
python | pandas-dev__pandas | pandas/tests/indexes/datetimelike_/test_equals.py | {
"start": 2895,
"end": 4976
} | class ____(EqualsTests):
@pytest.fixture
def index(self):
"""Fixture for creating a DatetimeIndex for use in equality tests."""
return date_range("2013-01-01", periods=5)
def test_equals2(self):
# GH#13107
idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
# check that we do not raise when comparing with OutOfBounds objects
oob = Index([datetime(2500, 1, 1)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
assert not idx3.equals(oob)
# check that we do not raise when comparing with OutOfBounds dt64
oob2 = oob.map(np.datetime64)
assert not idx.equals(oob2)
assert not idx2.equals(oob2)
assert not idx3.equals(oob2)
@pytest.mark.parametrize("freq", ["B", "C"])
def test_not_equals_bday(self, freq):
rng = date_range("2009-01-01", "2010-01-01", freq=freq)
assert not rng.equals(list(rng))
| TestDatetimeIndexEquals |
python | run-llama__llama_index | llama-index-core/tests/indices/response/test_tree_summarize.py | {
"start": 1744,
"end": 4284
} | class ____(BaseModel):
hello: str
def mock_return_class(*args: Any, **kwargs: Any) -> TestModel:
return TestModel(hello="Test Chunk 5")
@patch.object(MockLLM, "structured_predict", mock_return_class)
def test_tree_summarize_output_cls(mock_prompt_helper) -> None:
mock_summary_prompt_tmpl = "{context_str}{query_str}"
mock_summary_prompt = PromptTemplate(
mock_summary_prompt_tmpl, prompt_type=PromptType.SUMMARY
)
query_str = "What is?"
texts = [
'{"hello":"Test Chunk 1"}',
'{"hello":"Test Chunk 2"}',
'{"hello":"Test Chunk 3"}',
'{"hello":"Test Chunk 4"}',
]
response_dict = {"hello": "Test Chunk 5"}
# test sync
tree_summarize = TreeSummarize(
prompt_helper=mock_prompt_helper,
summary_template=mock_summary_prompt,
output_cls=TestModel,
)
full_response = "\n".join(texts)
response = tree_summarize.get_response(text_chunks=texts, query_str=query_str)
assert isinstance(response, TestModel)
assert response.model_dump() == response_dict
def test_tree_summarize_use_async(mock_prompt_helper) -> None:
mock_summary_prompt_tmpl = "{context_str}{query_str}"
mock_summary_prompt = PromptTemplate(
mock_summary_prompt_tmpl, prompt_type=PromptType.SUMMARY
)
query_str = "What is?"
texts = [
"Text chunk 1",
"Text chunk 2",
"Text chunk 3",
"Text chunk 4",
]
# test async
tree_summarize = TreeSummarize(
prompt_helper=mock_prompt_helper,
summary_template=mock_summary_prompt,
use_async=True,
)
response = tree_summarize.get_response(text_chunks=texts, query_str=query_str)
assert str(response) == "Text chunk 1\nText chunk 2\nText chunk 3\nText chunk 4"
@pytest.mark.asyncio
async def test_tree_summarize_async(mock_prompt_helper) -> None:
mock_summary_prompt_tmpl = "{context_str}{query_str}"
mock_summary_prompt = PromptTemplate(
mock_summary_prompt_tmpl, prompt_type=PromptType.SUMMARY
)
query_str = "What is?"
texts = [
"Text chunk 1",
"Text chunk 2",
"Text chunk 3",
"Text chunk 4",
]
# test async
tree_summarize = TreeSummarize(
prompt_helper=mock_prompt_helper,
summary_template=mock_summary_prompt,
)
response = await tree_summarize.aget_response(
text_chunks=texts, query_str=query_str
)
assert str(response) == "Text chunk 1\nText chunk 2\nText chunk 3\nText chunk 4"
| TestModel |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v3_cpu_ops_test.py | {
"start": 7815,
"end": 22920
} | class ____(parameterized.TestCase, test.TestCase):
@parameterized.parameters(
*list(
itertools.product(
[16, 32],
[1024, 2048],
["sum", "mean", "sqrtn"],
[0, 56, 1600000],
[0, 12, 20],
)
)
)
def test_convert_to_list_of_sparse_core_coo_tensor(
self, sample_count, token_count, combiner, col_offset, col_shift
):
sparse_feature = sparse_ops.sparse_reorder(
sparse_tensor.SparseTensor(
indices=[
[i % sample_count, i]
for i in np.random.randint(low=0, high=1024, size=token_count)
],
values=np.random.randint(low=0, high=1024, size=token_count),
dense_shape=[sample_count, 1024],
)
)
num_sc_per_chip = 4
num_chip = 128
row_offset = sample_count
num_sc_shards = num_sc_per_chip * num_chip
stacked_table_sample_count = sample_count * 4
num_sc_shards_bit = int(math.log2(num_sc_shards))
num_sc_shards_bit_mod = (1 << num_sc_shards_bit) - 1
num_sc_shards_bit_mod_inv = bitwise_ops.invert(num_sc_shards_bit_mod)
row_ids, col_ids, gains = convert_input_to_coo_tensor(
indices_or_row_splits=sparse_feature.indices,
values=sparse_feature.values,
weight=np.ones(shape=token_count),
sample_count=sample_count,
combiner=combiner,
)
golden_row_ids = (
row_ids % (sample_count // num_sc_per_chip)
+ int(row_offset // num_sc_per_chip)
+ int(stacked_table_sample_count // num_sc_per_chip)
* (row_ids // (sample_count // num_sc_per_chip))
)
golden_col_ids = (
bitwise_ops.bitwise_and(col_ids + col_shift, num_sc_shards_bit_mod)
+ bitwise_ops.bitwise_and(col_ids, num_sc_shards_bit_mod_inv)
+ col_offset
)
row_ids_list, col_ids_list, gains_list = (
xla_ops.convert_to_list_of_sparse_core_coo_tensors(
indices_or_row_splits=math_ops.cast(
sparse_feature.indices, dtype=dtypes.int32
),
values=math_ops.cast(sparse_feature.values, dtype=dtypes.int32),
weights=1.0,
sample_count=sample_count,
combiner=combiner,
num_sc_per_chip=4,
row_offset=row_offset,
col_offset=col_offset,
col_shift=col_shift,
num_sc_shards=num_sc_shards,
stacked_table_sample_count=stacked_table_sample_count,
)
)
self.assertAllClose(golden_row_ids, array_ops.concat(row_ids_list, axis=0))
self.assertAllClose(golden_col_ids, array_ops.concat(col_ids_list, axis=0))
self.assertAllClose(gains, array_ops.concat(gains_list, axis=0))
def test_convert_to_list_of_sparse_core_coo_tensors(self):
sample_count = 16
token_count = 1024
combiner = "sum"
sparse_feature = sparse_ops.sparse_reorder(
sparse_tensor.SparseTensor(
indices=[[i % sample_count, i] for i in np.arange(token_count)],
values=np.arange(token_count),
dense_shape=[sample_count, 1024],
)
)
row_ids_list, col_ids_list, gains_list = (
xla_ops.convert_to_list_of_sparse_core_coo_tensors(
indices_or_row_splits=math_ops.cast(
sparse_feature.indices, dtype=dtypes.int32
),
values=math_ops.cast(sparse_feature.values, dtype=dtypes.int32),
weights=1.0,
sample_count=sample_count,
combiner=combiner,
num_sc_per_chip=4,
row_offset=0,
col_offset=0,
col_shift=0,
num_sc_shards=16,
stacked_table_sample_count=sample_count,
)
)
sorted_row_ids_list = []
sorted_col_ids_list = []
sorted_gains_list = []
id_counts_list = []
for i in range(4):
(
sorted_row_ids,
sorted_col_ids,
sorted_gains,
id_counts,
) = xla_ops.sort_list_of_sparse_core_coo_tensors(
row_ids_list=[row_ids_list[i]],
col_ids_list=[col_ids_list[i]],
gains_list=[gains_list[i]],
sample_count_list=[sample_count // 4],
col_offset_list=[0],
num_replica=4,
table_vocab_size=16384,
feature_width=16,
num_sc_per_chip=4,
max_ids_per_sparse_core=256,
max_unique_ids_per_sparse_core=256,
table_name="table",
)
sorted_row_ids_list.append(sorted_row_ids)
sorted_col_ids_list.append(sorted_col_ids)
sorted_gains_list.append(sorted_gains)
id_counts_list.append(id_counts)
(
row_pointers,
sorted_sample_ids,
sorted_token_ids,
sorted_gains,
row_pointers_unpadded_size,
ids_unpadded_size,
num_minibatches_per_sc,
) = xla_ops.convert_to_sparse_core_csr_wrapped_coo_tensor(
sorted_row_ids_list=sorted_row_ids_list,
sorted_col_ids_list=sorted_col_ids_list,
sorted_gains_list=sorted_gains_list,
id_counts_list=id_counts_list,
splits=constant_op.constant(0, dtype=dtypes.int64),
sample_count_per_sc=sample_count // 4,
max_minibatches_per_sc=4,
max_ids_per_chip_per_sample=64,
table_vocab_size=16384,
feature_width=16,
num_replica=4,
allow_id_dropping=False,
table_name="table",
)
(
golden_row_pointers,
golden_sorted_sample_ids,
golden_sorted_token_ids,
golden_sorted_gains,
golden_row_pointers_unpadded_size,
golden_ids_unpadded_size,
golden_num_minibatches_per_sc,
) = _convert_coo_tensor_to_csr_with_physical_replica(
row_ids=array_ops.concat(row_ids_list, axis=0),
col_ids=array_ops.concat(col_ids_list, axis=0),
gains=array_ops.concat(gains_list, axis=0),
splits=constant_op.constant(0, dtype=dtypes.int64),
sample_count=sample_count,
num_replica=4,
max_minibatches_per_sc=4,
max_ids_per_chip_per_sample=64,
table_vocab_size=16384,
)
self.assertAllClose(
golden_row_pointers[:golden_row_pointers_unpadded_size],
row_pointers[:row_pointers_unpadded_size],
)
self.assertAllClose(
golden_sorted_sample_ids[:golden_ids_unpadded_size],
sorted_sample_ids[:ids_unpadded_size],
)
self.assertAllClose(
golden_sorted_token_ids[:golden_ids_unpadded_size],
sorted_token_ids[:ids_unpadded_size],
)
self.assertAllClose(
golden_sorted_gains[:golden_ids_unpadded_size],
sorted_gains[:ids_unpadded_size],
)
self.assertEqual(golden_num_minibatches_per_sc, num_minibatches_per_sc)
def test_get_stats_from_list_of_sparse_core_coo_tensors(self):
sample_count = 16
token_count = 1024
combiner = "sum"
sparse_feature = sparse_ops.sparse_reorder(
sparse_tensor.SparseTensor(
indices=[
[i % sample_count, i]
for i in np.random.randint(low=0, high=1024, size=token_count)
],
values=np.random.randint(low=0, high=1024, size=token_count),
dense_shape=[sample_count, 1024],
)
)
max_ids_golden = 0
max_unique_ids_golden = 0
for i in range(4):
sparse_feature_slice = sparse_ops.sparse_slice(
sparse_feature,
[i * sample_count // 4, 0],
[sample_count // 4, 1024],
)
max_ids_per_sparse_core, max_uniques_per_sparse_core = (
_compute_sparse_core_stats(
sparse_feature_slice.indices[:, 0],
sparse_feature_slice.values,
num_sc_shards=16,
)
)
max_ids_golden = max(max_ids_golden, max_ids_per_sparse_core)
max_unique_ids_golden = max(
max_unique_ids_golden, max_uniques_per_sparse_core
)
row_ids_list, col_ids_list, gains_list = (
xla_ops.convert_to_list_of_sparse_core_coo_tensors(
indices_or_row_splits=math_ops.cast(
sparse_feature.indices, dtype=dtypes.int32
),
values=math_ops.cast(sparse_feature.values, dtype=dtypes.int32),
weights=1.0,
sample_count=sample_count,
combiner=combiner,
num_sc_per_chip=4,
row_offset=0,
col_offset=0,
col_shift=0,
num_sc_shards=16,
stacked_table_sample_count=sample_count,
)
)
max_ids = 0
max_uniques = 0
for i in range(4):
max_ids_per_sparse_core, max_unique_ids_per_sparse_core = (
xla_ops.get_stats_from_list_of_sparse_core_coo_tensors(
row_ids_list=[row_ids_list[i]],
col_ids_list=[col_ids_list[i]],
gains_list=[gains_list[i]],
sample_count_list=[sample_count // 4],
col_offset_list=[0],
num_replica=4,
table_vocab_size=16384,
feature_width=16,
num_sc_per_chip=4,
table_name="table",
)
)
max_ids = max(max_ids, max_ids_per_sparse_core)
max_uniques = max(max_uniques, max_unique_ids_per_sparse_core)
self.assertEqual(max_ids, max_ids_golden)
self.assertEqual(max_uniques, max_unique_ids_golden)
def test_sort_list_of_sparse_core_coo_tensors(self):
sample_count = 16
token_count = 1024
combiner = "sum"
num_chips = 4
sparse_feature = sparse_ops.sparse_reorder(
sparse_tensor.SparseTensor(
indices=[[i % sample_count, i] for i in np.arange(token_count)],
values=np.arange(token_count),
dense_shape=[sample_count, 1024],
)
)
row_ids_list, col_ids_list, gains_list = (
xla_ops.convert_to_list_of_sparse_core_coo_tensors(
indices_or_row_splits=math_ops.cast(
sparse_feature.indices, dtype=dtypes.int32
),
values=math_ops.cast(sparse_feature.values, dtype=dtypes.int32),
weights=1.0,
sample_count=sample_count,
combiner=combiner,
num_sc_per_chip=4,
row_offset=0,
col_offset=0,
col_shift=0,
num_sc_shards=num_chips * 4,
stacked_table_sample_count=sample_count,
)
)
for i in range(4):
(
sorted_row_ids,
sorted_col_ids,
sorted_gains,
_,
) = xla_ops.sort_list_of_sparse_core_coo_tensors(
row_ids_list=[row_ids_list[i]],
col_ids_list=[col_ids_list[i]],
gains_list=[gains_list[i]],
sample_count_list=[sample_count // 4],
col_offset_list=[0],
num_replica=num_chips,
table_vocab_size=16384,
feature_width=16,
num_sc_per_chip=4,
max_ids_per_sparse_core=256,
max_unique_ids_per_sparse_core=256,
table_name="table",
)
embedding_lookup_inputs = []
for row_id, col_id, gain in zip(
row_ids_list[i], col_ids_list[i], gains_list[i]
):
embedding_lookup_inputs.append((col_id % 16, col_id, row_id, gain))
# sort based on replica id first, then col_id.
embedding_lookup_inputs.sort()
self.assertAllClose(
sorted_row_ids,
[inp[2] % (sample_count // 4) for inp in embedding_lookup_inputs],
)
self.assertAllClose(
sorted_col_ids,
[inp[1] // (num_chips * 4) for inp in embedding_lookup_inputs],
)
self.assertAllClose(
sorted_gains, [inp[3] for inp in embedding_lookup_inputs]
)
def test_id_dropping_with_convert_to_list_of_sparse_core_coo_tensors(self):
sample_count = 16
token_count = 1024
combiner = "sum"
sparse_feature = sparse_ops.sparse_reorder(
sparse_tensor.SparseTensor(
indices=[[i % sample_count, i] for i in np.arange(token_count)],
values=np.arange(token_count),
dense_shape=[sample_count, 1024],
)
)
row_ids_list, col_ids_list, gains_list = (
xla_ops.convert_to_list_of_sparse_core_coo_tensors(
indices_or_row_splits=math_ops.cast(
sparse_feature.indices, dtype=dtypes.int32
),
values=math_ops.cast(sparse_feature.values, dtype=dtypes.int32),
weights=1.0,
sample_count=sample_count,
combiner=combiner,
num_sc_per_chip=4,
row_offset=0,
col_offset=0,
col_shift=0,
num_sc_shards=16,
stacked_table_sample_count=sample_count,
)
)
sorted_row_ids_list = []
sorted_col_ids_list = []
sorted_gains_list = []
id_counts_list = []
for i in range(4):
(
sorted_row_ids,
sorted_col_ids,
sorted_gains,
id_counts,
) = xla_ops.sort_list_of_sparse_core_coo_tensors(
row_ids_list=[row_ids_list[i]],
col_ids_list=[col_ids_list[i]],
gains_list=[gains_list[i]],
sample_count_list=[sample_count // 4],
col_offset_list=[0],
num_replica=4,
table_vocab_size=16384,
feature_width=16,
num_sc_per_chip=4,
max_ids_per_sparse_core=256,
max_unique_ids_per_sparse_core=256,
table_name="table",
)
sorted_row_ids_list.append(sorted_row_ids)
sorted_col_ids_list.append(sorted_col_ids)
sorted_gains_list.append(sorted_gains)
id_counts_list.append(id_counts)
# If not allow id dropping, the op will fail with very small
# max_ids_per_chip_per_sample.
with self.assertRaises(Exception):
xla_ops.convert_to_sparse_core_csr_wrapped_coo_tensor(
sorted_row_ids_list=sorted_row_ids_list,
sorted_col_ids_list=sorted_col_ids_list,
sorted_gains_list=sorted_gains_list,
id_counts_list=id_counts_list,
splits=constant_op.constant(0, dtype=dtypes.int64),
sample_count_per_sc=sample_count // 4,
max_minibatches_per_sc=4,
max_ids_per_chip_per_sample=8,
table_vocab_size=16384,
feature_width=16,
num_replica=4,
allow_id_dropping=False,
table_name="table",
)
# Allow id dropping, the op will succeed,
xla_ops.convert_to_sparse_core_csr_wrapped_coo_tensor(
sorted_row_ids_list=sorted_row_ids_list,
sorted_col_ids_list=sorted_col_ids_list,
sorted_gains_list=sorted_gains_list,
id_counts_list=id_counts_list,
splits=constant_op.constant(0, dtype=dtypes.int64),
sample_count_per_sc=sample_count // 4,
max_minibatches_per_sc=4,
max_ids_per_chip_per_sample=8,
table_vocab_size=16384,
feature_width=16,
num_replica=4,
allow_id_dropping=True,
table_name="table",
)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
test.main()
| TpuEmbeddingV3CPUOpsTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 252143,
"end": 252516
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "project_card")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
project_card = sgqlc.types.Field("ProjectCard", graphql_name="projectCard")
| ConvertProjectCardNoteToIssuePayload |
python | getsentry__sentry | tests/sentry/search/events/test_filter.py | {
"start": 6912,
"end": 14144
} | class ____(BaseSemverConverterTest):
key = SEMVER_ALIAS
def converter(self, *args, **kwargs):
return _semver_filter_converter(*args, **kwargs)
def test_invalid_params(self) -> None:
key = SEMVER_ALIAS
filter = SearchFilter(SearchKey(key), ">", SearchValue("1.2.3"))
with pytest.raises(ValueError, match="organization_id is a required param"):
_semver_filter_converter(filter, key, None)
with pytest.raises(ValueError, match="organization_id is a required param"):
_semver_filter_converter(filter, key, {"something": 1}) # type: ignore[arg-type] # intentionally bad data
def test_invalid_query(self) -> None:
key = SEMVER_ALIAS
filter = SearchFilter(SearchKey(key), ">", SearchValue("1.2.hi"))
with pytest.raises(
InvalidSearchQuery,
match=INVALID_SEMVER_MESSAGE,
):
_semver_filter_converter(filter, key, {"organization_id": self.organization.id})
def test_empty(self) -> None:
self.run_test(">", "1.2.3", "IN", [SEMVER_EMPTY_RELEASE])
def test(self) -> None:
release = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test@1.2.4")
self.run_test(">", "1.2.3", "IN", [release_2.version])
self.run_test(">=", "1.2.4", "IN", [release_2.version])
self.run_test("<", "1.2.4", "IN", [release.version])
self.run_test("<=", "1.2.3", "IN", [release.version])
self.run_test("=", "1.2.4", "IN", [release_2.version])
def test_invert_query(self) -> None:
# Tests that flipping the query works and uses a NOT IN. Test all operators to
# make sure the inversion works correctly.
release = self.create_release(version="test@1.2.3")
self.create_release(version="test@1.2.4")
release_2 = self.create_release(version="test@1.2.5")
with patch("sentry.search.events.filter.MAX_SEARCH_RELEASES", 2):
self.run_test(">", "1.2.3", "NOT IN", [release.version])
self.run_test(">=", "1.2.4", "NOT IN", [release.version])
self.run_test("<", "1.2.5", "NOT IN", [release_2.version])
self.run_test("<=", "1.2.4", "NOT IN", [release_2.version])
self.run_test("!=", "1.2.3", "NOT IN", [release.version])
def test_invert_fails(self) -> None:
# Tests that when we invert and still receive too many records that we return
# as many records we can using IN that are as close to the specified filter as
# possible.
self.create_release(version="test@1.2.1")
release_1 = self.create_release(version="test@1.2.2")
release_2 = self.create_release(version="test@1.2.3")
release_3 = self.create_release(version="test@1.2.4")
self.create_release(version="test@1.2.5")
with patch("sentry.search.events.filter.MAX_SEARCH_RELEASES", 2):
self.run_test(">", "1.2.2", "IN", [release_2.version, release_3.version])
self.run_test(">=", "1.2.3", "IN", [release_2.version, release_3.version])
self.run_test("<", "1.2.4", "IN", [release_2.version, release_1.version])
self.run_test("<=", "1.2.3", "IN", [release_2.version, release_1.version])
def test_prerelease(self) -> None:
# Prerelease has weird sorting rules, where an empty string is higher priority
# than a non-empty string. Make sure this sorting works
release = self.create_release(version="test@1.2.3-alpha")
release_1 = self.create_release(version="test@1.2.3-beta")
release_2 = self.create_release(version="test@1.2.3")
release_3 = self.create_release(version="test@1.2.4-alpha")
release_4 = self.create_release(version="test@1.2.4")
self.run_test(
">=", "1.2.3", "IN", [release_2.version, release_3.version, release_4.version]
)
self.run_test(
">=",
"1.2.3-beta",
"IN",
[release_1.version, release_2.version, release_3.version, release_4.version],
)
self.run_test("<", "1.2.3", "IN", [release_1.version, release.version])
def test_granularity(self) -> None:
self.create_release(version="test@1.0.0.0")
release_2 = self.create_release(version="test@1.2.0.0")
release_3 = self.create_release(version="test@1.2.3.0")
release_4 = self.create_release(version="test@1.2.3.4")
release_5 = self.create_release(version="test@2.0.0.0")
self.run_test(
">",
"1",
"IN",
[release_2.version, release_3.version, release_4.version, release_5.version],
)
self.run_test(">", "1.2", "IN", [release_3.version, release_4.version, release_5.version])
self.run_test(">", "1.2.3", "IN", [release_4.version, release_5.version])
self.run_test(">", "1.2.3.4", "IN", [release_5.version])
self.run_test(">", "2", "IN", [SEMVER_EMPTY_RELEASE])
def test_wildcard(self) -> None:
release_1 = self.create_release(version="test@1.0.0.0")
release_2 = self.create_release(version="test@1.2.0.0")
release_3 = self.create_release(version="test@1.2.3.0")
release_4 = self.create_release(version="test@1.2.3.4")
release_5 = self.create_release(version="test@2.0.0.0")
self.run_test(
"=",
"1.X",
"IN",
[release_1.version, release_2.version, release_3.version, release_4.version],
)
self.run_test("=", "1.2.*", "IN", [release_2.version, release_3.version, release_4.version])
self.run_test("=", "1.2.3.*", "IN", [release_3.version, release_4.version])
self.run_test("=", "1.2.3.4", "IN", [release_4.version])
self.run_test("=", "2.*", "IN", [release_5.version])
def test_multi_package(self) -> None:
release_1 = self.create_release(version="test@1.0.0.0")
release_2 = self.create_release(version="test@1.2.0.0")
release_3 = self.create_release(version="test_2@1.2.3.0")
self.run_test("=", "test@1.*", "IN", [release_1.version, release_2.version])
self.run_test(">=", "test@1.0", "IN", [release_1.version, release_2.version])
self.run_test(">", "test_2@1.0", "IN", [release_3.version])
def test_projects(self) -> None:
project_2 = self.create_project()
release_1 = self.create_release(version="test@1.0.0.0")
release_2 = self.create_release(version="test@1.2.0.0", project=project_2)
release_3 = self.create_release(version="test@1.2.3.0")
self.run_test(
">=",
"test@1.0",
"IN",
[release_1.version, release_2.version, release_3.version],
project_id=[self.project.id, project_2.id],
)
self.run_test(
">=",
"test@1.0",
"IN",
[release_1.version, release_3.version],
project_id=[self.project.id],
)
self.run_test(
">=",
"test@1.0",
"IN",
[release_2.version],
project_id=[project_2.id],
)
| SemverFilterConverterTest |
python | django__django | tests/auth_tests/test_views.py | {
"start": 38472,
"end": 40238
} | class ____(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url):
response = self.client.get("/login_required/")
self.assertRedirects(response, url, fetch_redirect_response=False)
@override_settings(LOGIN_URL="/login/")
def test_standard_login_url(self):
self.assertLoginURLEquals("/login/?next=/login_required/")
@override_settings(LOGIN_URL="login")
def test_named_login_url(self):
self.assertLoginURLEquals("/login/?next=/login_required/")
@override_settings(LOGIN_URL="http://remote.example.com/login")
def test_remote_login_url(self):
quoted_next = quote("http://testserver/login_required/")
expected = "http://remote.example.com/login?next=%s" % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL="https:///login/")
def test_https_login_url(self):
quoted_next = quote("http://testserver/login_required/")
expected = "https:///login/?next=%s" % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL="/login/?pretty=1")
def test_login_url_with_querystring(self):
self.assertLoginURLEquals("/login/?pretty=1&next=/login_required/")
@override_settings(LOGIN_URL="http://remote.example.com/login/?next=/default/")
def test_remote_login_url_with_next_querystring(self):
quoted_next = quote("http://testserver/login_required/")
expected = "http://remote.example.com/login/?next=%s" % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy("login"))
def test_lazy_login_url(self):
self.assertLoginURLEquals("/login/?next=/login_required/")
| LoginURLSettings |
python | pyca__cryptography | src/cryptography/hazmat/_oid.py | {
"start": 362,
"end": 2083
} | class ____:
SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier("2.5.29.9")
SUBJECT_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.14")
KEY_USAGE = ObjectIdentifier("2.5.29.15")
PRIVATE_KEY_USAGE_PERIOD = ObjectIdentifier("2.5.29.16")
SUBJECT_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.17")
ISSUER_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.18")
BASIC_CONSTRAINTS = ObjectIdentifier("2.5.29.19")
NAME_CONSTRAINTS = ObjectIdentifier("2.5.29.30")
CRL_DISTRIBUTION_POINTS = ObjectIdentifier("2.5.29.31")
CERTIFICATE_POLICIES = ObjectIdentifier("2.5.29.32")
POLICY_MAPPINGS = ObjectIdentifier("2.5.29.33")
AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.35")
POLICY_CONSTRAINTS = ObjectIdentifier("2.5.29.36")
EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37")
FRESHEST_CRL = ObjectIdentifier("2.5.29.46")
INHIBIT_ANY_POLICY = ObjectIdentifier("2.5.29.54")
ISSUING_DISTRIBUTION_POINT = ObjectIdentifier("2.5.29.28")
AUTHORITY_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.1")
SUBJECT_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.11")
OCSP_NO_CHECK = ObjectIdentifier("1.3.6.1.5.5.7.48.1.5")
TLS_FEATURE = ObjectIdentifier("1.3.6.1.5.5.7.1.24")
CRL_NUMBER = ObjectIdentifier("2.5.29.20")
DELTA_CRL_INDICATOR = ObjectIdentifier("2.5.29.27")
PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS = ObjectIdentifier(
"1.3.6.1.4.1.11129.2.4.2"
)
PRECERT_POISON = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3")
SIGNED_CERTIFICATE_TIMESTAMPS = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.5")
MS_CERTIFICATE_TEMPLATE = ObjectIdentifier("1.3.6.1.4.1.311.21.7")
ADMISSIONS = ObjectIdentifier("1.3.36.8.3.3")
| ExtensionOID |
python | huggingface__transformers | src/transformers/models/opt/modeling_opt.py | {
"start": 28718,
"end": 31108
} | class ____(OPTPreTrainedModel):
def __init__(self, config: OPTConfig):
super().__init__(config)
self.decoder = OPTDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
position_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return BaseModelOutputWithPast(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
)
| OPTModel |
python | realpython__materials | python-callable-instances/serializing.py | {
"start": 123,
"end": 208
} | class ____:
def __call__(self, data):
return yaml.dump(data)
| YamlSerializer |
python | wandb__wandb | wandb/sdk/data_types/base_types/wb_value.py | {
"start": 2634,
"end": 2856
} | class ____:
artifact: "Artifact"
name: Optional[str]
def __init__(self, artifact: "Artifact", name: Optional[str] = None) -> None:
self.artifact = artifact
self.name = name
| _WBValueArtifactSource |
python | sympy__sympy | sympy/functions/elementary/trigonometric.py | {
"start": 7613,
"end": 18450
} | class ____(TrigonometricFunction):
r"""
The sine function.
Returns the sine of x (measured in radians).
Explanation
===========
This function will evaluate automatically in the
case $x/\pi$ is some rational number [4]_. For example,
if $x$ is a multiple of $\pi$, $\pi/2$, $\pi/3$, $\pi/4$, and $\pi/6$.
Examples
========
>>> from sympy import sin, pi
>>> from sympy.abc import x
>>> sin(x**2).diff(x)
2*x*cos(x**2)
>>> sin(1).diff(x)
0
>>> sin(pi)
0
>>> sin(pi/2)
1
>>> sin(pi/6)
1/2
>>> sin(pi/12)
-sqrt(2)/4 + sqrt(6)/4
See Also
========
sympy.functions.elementary.trigonometric.csc
sympy.functions.elementary.trigonometric.cos
sympy.functions.elementary.trigonometric.sec
sympy.functions.elementary.trigonometric.tan
sympy.functions.elementary.trigonometric.cot
sympy.functions.elementary.trigonometric.asin
sympy.functions.elementary.trigonometric.acsc
sympy.functions.elementary.trigonometric.acos
sympy.functions.elementary.trigonometric.asec
sympy.functions.elementary.trigonometric.atan
sympy.functions.elementary.trigonometric.acot
sympy.functions.elementary.trigonometric.atan2
References
==========
.. [1] https://en.wikipedia.org/wiki/Trigonometric_functions
.. [2] https://dlmf.nist.gov/4.14
.. [3] https://functions.wolfram.com/ElementaryFunctions/Sin
.. [4] https://mathworld.wolfram.com/TrigonometryAngles.html
"""
def period(self, symbol=None):
return self._period(2*pi, symbol)
def fdiff(self, argindex=1):
if argindex == 1:
return cos(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.sets.setexpr import SetExpr
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg.is_zero:
return S.Zero
elif arg in (S.Infinity, S.NegativeInfinity):
return AccumBounds(-1, 1)
if arg is S.ComplexInfinity:
return S.NaN
if isinstance(arg, AccumBounds):
from sympy.sets.sets import FiniteSet
min, max = arg.min, arg.max
d = floor(min/(2*pi))
if min is not S.NegativeInfinity:
min = min - d*2*pi
if max is not S.Infinity:
max = max - d*2*pi
if AccumBounds(min, max).intersection(FiniteSet(pi/2, pi*Rational(5, 2))) \
is not S.EmptySet and \
AccumBounds(min, max).intersection(FiniteSet(pi*Rational(3, 2),
pi*Rational(7, 2))) is not S.EmptySet:
return AccumBounds(-1, 1)
elif AccumBounds(min, max).intersection(FiniteSet(pi/2, pi*Rational(5, 2))) \
is not S.EmptySet:
return AccumBounds(Min(sin(min), sin(max)), 1)
elif AccumBounds(min, max).intersection(FiniteSet(pi*Rational(3, 2), pi*Rational(8, 2))) \
is not S.EmptySet:
return AccumBounds(-1, Max(sin(min), sin(max)))
else:
return AccumBounds(Min(sin(min), sin(max)),
Max(sin(min), sin(max)))
elif isinstance(arg, SetExpr):
return arg._eval_func(cls)
if arg.could_extract_minus_sign():
return -cls(-arg)
i_coeff = _imaginary_unit_as_coefficient(arg)
if i_coeff is not None:
from sympy.functions.elementary.hyperbolic import sinh
return S.ImaginaryUnit*sinh(i_coeff)
pi_coeff = _pi_coeff(arg)
if pi_coeff is not None:
if pi_coeff.is_integer:
return S.Zero
if (2*pi_coeff).is_integer:
# is_even-case handled above as then pi_coeff.is_integer,
# so check if known to be not even
if pi_coeff.is_even is False:
return S.NegativeOne**(pi_coeff - S.Half)
if not pi_coeff.is_Rational:
narg = pi_coeff*pi
if narg != arg:
return cls(narg)
return None
# https://github.com/sympy/sympy/issues/6048
# transform a sine to a cosine, to avoid redundant code
if pi_coeff.is_Rational:
x = pi_coeff % 2
if x > 1:
return -cls((x % 1)*pi)
if 2*x > 1:
return cls((1 - x)*pi)
narg = ((pi_coeff + Rational(3, 2)) % 2)*pi
result = cos(narg)
if not isinstance(result, cos):
return result
if pi_coeff*pi != arg:
return cls(pi_coeff*pi)
return None
if arg.is_Add:
x, m = _peeloff_pi(arg)
if m:
m = m*pi
return sin(m)*cos(x) + cos(m)*sin(x)
if arg.is_zero:
return S.Zero
if isinstance(arg, asin):
return arg.args[0]
if isinstance(arg, atan):
x = arg.args[0]
return x/sqrt(1 + x**2)
if isinstance(arg, atan2):
y, x = arg.args
return y/sqrt(x**2 + y**2)
if isinstance(arg, acos):
x = arg.args[0]
return sqrt(1 - x**2)
if isinstance(arg, acot):
x = arg.args[0]
return 1/(sqrt(1 + 1/x**2)*x)
if isinstance(arg, acsc):
x = arg.args[0]
return 1/x
if isinstance(arg, asec):
x = arg.args[0]
return sqrt(1 - 1/x**2)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return -p*x**2/(n*(n - 1))
else:
return S.NegativeOne**(n//2)*x**n/factorial(n)
def _eval_nseries(self, x, n, logx, cdir=0):
arg = self.args[0]
if logx is not None:
arg = arg.subs(log(x), logx)
if arg.subs(x, 0).has(S.NaN, S.ComplexInfinity):
raise PoleError("Cannot expand %s around 0" % (self))
return super()._eval_nseries(x, n=n, logx=logx, cdir=cdir)
def _eval_rewrite_as_exp(self, arg, **kwargs):
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
I = S.ImaginaryUnit
if isinstance(arg, (TrigonometricFunction, HyperbolicFunction)):
arg = arg.func(arg.args[0]).rewrite(exp)
return (exp(arg*I) - exp(-arg*I))/(2*I)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
if isinstance(arg, log):
I = S.ImaginaryUnit
x = arg.args[0]
return I*x**-I/2 - I*x**I /2
def _eval_rewrite_as_cos(self, arg, **kwargs):
return cos(arg - pi/2, evaluate=False)
def _eval_rewrite_as_tan(self, arg, **kwargs):
tan_half = tan(S.Half*arg)
return 2*tan_half/(1 + tan_half**2)
def _eval_rewrite_as_sincos(self, arg, **kwargs):
return sin(arg)*cos(arg)/cos(arg)
def _eval_rewrite_as_cot(self, arg, **kwargs):
cot_half = cot(S.Half*arg)
return Piecewise((0, And(Eq(im(arg), 0), Eq(Mod(arg, pi), 0))),
(2*cot_half/(1 + cot_half**2), True))
def _eval_rewrite_as_pow(self, arg, **kwargs):
return self.rewrite(cos, **kwargs).rewrite(pow, **kwargs)
def _eval_rewrite_as_sqrt(self, arg, **kwargs):
return self.rewrite(cos, **kwargs).rewrite(sqrt, **kwargs)
def _eval_rewrite_as_csc(self, arg, **kwargs):
return 1/csc(arg)
def _eval_rewrite_as_sec(self, arg, **kwargs):
return 1/sec(arg - pi/2, evaluate=False)
def _eval_rewrite_as_sinc(self, arg, **kwargs):
return arg*sinc(arg)
def _eval_rewrite_as_besselj(self, arg, **kwargs):
from sympy.functions.special.bessel import besselj
return sqrt(pi*arg/2)*besselj(S.Half, arg)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy.functions.elementary.hyperbolic import cosh, sinh
re, im = self._as_real_imag(deep=deep, **hints)
return (sin(re)*cosh(im), cos(re)*sinh(im))
def _eval_expand_trig(self, **hints):
from sympy.functions.special.polynomials import chebyshevt, chebyshevu
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
# TODO: Do this more efficiently for more than two terms
x, y = arg.as_two_terms()
sx = sin(x, evaluate=False)._eval_expand_trig()
sy = sin(y, evaluate=False)._eval_expand_trig()
cx = cos(x, evaluate=False)._eval_expand_trig()
cy = cos(y, evaluate=False)._eval_expand_trig()
return sx*cy + sy*cx
elif arg.is_Mul:
n, x = arg.as_coeff_Mul(rational=True)
if n.is_Integer: # n will be positive because of .eval
# canonicalization
# See https://mathworld.wolfram.com/Multiple-AngleFormulas.html
if n.is_odd:
return S.NegativeOne**((n - 1)/2)*chebyshevt(n, sin(x))
else:
return expand_mul(S.NegativeOne**(n/2 - 1)*cos(x)*
chebyshevu(n - 1, sin(x)), deep=False)
return sin(arg)
def _eval_as_leading_term(self, x, logx, cdir):
from sympy.calculus.accumulationbounds import AccumBounds
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
n = x0/pi
if n.is_integer:
lt = (arg - n*pi).as_leading_term(x)
return (S.NegativeOne**n)*lt
if x0 is S.ComplexInfinity:
x0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
if x0 in [S.Infinity, S.NegativeInfinity]:
return AccumBounds(-1, 1)
return self.func(x0) if x0.is_finite else self
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_extended_real:
return True
def _eval_is_zero(self):
rest, pi_mult = _peeloff_pi(self.args[0])
if rest.is_zero:
return pi_mult.is_integer
def _eval_is_complex(self):
if self.args[0].is_extended_real \
or self.args[0].is_complex:
return True
| sin |
python | geekcomputers__Python | nitkarshchourasia/to_sort/GUI_apps/tkinter_apps/simple_calc_GUI/simple_calculator_GUI.py | {
"start": 7404,
"end": 10683
} | class ____:
def __init__(self):
# Author Information
self.author_name = "Nitkarsh Chourasia"
self.author_email = "playnitkarsh@gmail.com"
self.gh_profile_url = "https://github.com/NitkarshChourasia"
self.gh_username = "NitkarshChourasia"
# Project Information
self.project_name = "Simple Calculator"
self.project_description = (
"A simple calculator app made using Python and Tkinter."
)
self.project_creation_date = "30-09-2023"
self.project_version = "1.0.0"
# Edits
self.original_author = "Nitkarsh Chourasia"
self.original_author_email = "playnitkarsh@gmail.com"
self.last_edit_date = "30-09-2023"
self.last_edit_author = "Nitkarsh Chourasia"
self.last_edit_author_email = "playnitkarsh@gmail.com"
self.last_edit_author_gh_profile_url = "https://github.com/NitkarshChourasia"
self.last_edit_author_gh_username = "NitkarshChourasia"
def display_author_info(self):
"""Display author information."""
print(f"Author Name: {self.author_name}")
print(f"Author Email: {self.author_email}")
print(f"GitHub Profile URL: {self.gh_profile_url}")
print(f"GitHub Username: {self.gh_username}")
def display_project_info(self):
"""Display project information."""
print(f"Project Name: {self.project_name}")
print(f"Project Description: {self.project_description}")
print(f"Project Creation Date: {self.project_creation_date}")
print(f"Project Version: {self.project_version}")
def display_edit_info(self):
"""Display edit information."""
print(f"Original Author: {self.original_author}")
print(f"Original Author Email: {self.original_author_email}")
print(f"Last Edit Date: {self.last_edit_date}")
print(f"Last Edit Author: {self.last_edit_author}")
print(f"Last Edit Author Email: {self.last_edit_author_email}")
print(
f"Last Edit Author GitHub Profile URL: {self.last_edit_author_gh_profile_url}"
)
print(f"Last Edit Author GitHub Username: {self.last_edit_author_gh_username}")
def open_github_profile(self) -> None:
"""Open the author's GitHub profile in a new tab."""
import webbrowser
return webbrowser.open_new_tab(self.gh_profile_url)
if __name__ == "__main__":
# start_reloader()
main()
# # Example usage:
# metadata = Metadata()
# # Display author information
# metadata.display_author_info()
# # Display project information
# metadata.display_project_info()
# # Display edit information
# metadata.display_edit_info()
# TODO: More features to add:
# Responsive design is not there.
# The program is not OOP based, there is lots and lots of repetitions.
# Bigger fonts.
# Adjustable everything.
# Default size, launch, but customizable.
# Adding history.
# Being able to continuosly operate on a number.
# What is the error here, see to it.
# To add Author Metadata.
# TODO: More features will be added, soon.
# Working.
# Perfect.
# Complete.
# Do not remove the comments, they make the program understandable.
# Thank you. :) ❤️
# Made with ❤️
| Metadata |
python | doocs__leetcode | solution/0900-0999/0990.Satisfiability of Equality Equations/Solution.py | {
"start": 0,
"end": 560
} | class ____:
def equationsPossible(self, equations: List[str]) -> bool:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
p = list(range(26))
for e in equations:
a, b = ord(e[0]) - ord('a'), ord(e[-1]) - ord('a')
if e[1] == '=':
p[find(a)] = find(b)
for e in equations:
a, b = ord(e[0]) - ord('a'), ord(e[-1]) - ord('a')
if e[1] == '!' and find(a) == find(b):
return False
return True
| Solution |
python | django__django | tests/get_or_create/models.py | {
"start": 31,
"end": 282
} | class ____(models.Model):
first_name = models.CharField(max_length=100, unique=True)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
defaults = models.TextField()
create_defaults = models.TextField()
| Person |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1045969,
"end": 1046404
} | class ____(Field):
"""
RepeatRef schema wrapper.
Reference to a repeated value.
Parameters
----------
repeat : Literal['row', 'column', 'repeat', 'layer']
"""
_schema = {"$ref": "#/definitions/RepeatRef"}
def __init__(
self,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
**kwds,
):
super().__init__(repeat=repeat, **kwds)
| RepeatRef |
python | donnemartin__interactive-coding-challenges | linked_lists/add_reverse/test_add_reverse.py | {
"start": 18,
"end": 1482
} | class ____(unittest.TestCase):
def test_add_reverse(self):
print('Test: Empty list(s)')
self.assertEqual(MyLinkedList().add_reverse(None, None), None)
self.assertEqual(MyLinkedList().add_reverse(Node(5), None), None)
self.assertEqual(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
self.assertEqual(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
self.assertEqual(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
| TestAddReverse |
python | pytorch__pytorch | test/profiler/test_memory_profiler.py | {
"start": 1521,
"end": 1780
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.scale = torch.nn.Parameter(torch.rand(()), requires_grad=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x * self.scale
| ScaleLayer |
python | coleifer__peewee | tests/pool.py | {
"start": 1670,
"end": 10795
} | class ____(BaseTestCase):
def setUp(self):
super(TestPooledDatabase, self).setUp()
self.db = FakePooledDatabase('testing')
def test_connection_pool(self):
# Closing and reopening a connection returns us the same conn.
self.assertEqual(self.db.connection(), 1)
self.assertEqual(self.db.connection(), 1)
self.db.close()
self.db.connect()
self.assertEqual(self.db.connection(), 1)
def test_reuse_connection(self):
# Verify the connection pool correctly handles calling connect twice.
self.assertEqual(self.db.connection(), 1)
self.assertRaises(OperationalError, self.db.connect)
self.assertFalse(self.db.connect(reuse_if_open=True))
self.assertEqual(self.db.connection(), 1)
self.db.close()
self.db.connect()
self.assertEqual(self.db.connection(), 1)
def test_concurrent_connections(self):
db = FakePooledDatabase('testing')
signal = threading.Event()
def open_conn():
db.connect()
signal.wait()
db.close()
# Simulate 5 concurrent connections.
threads = [threading.Thread(target=open_conn) for i in range(5)]
for thread in threads:
thread.start()
# Wait for all connections to be opened.
while db.counter < 5:
time.sleep(.01)
# Signal threads to close connections and join threads.
signal.set()
for t in threads: t.join()
self.assertEqual(db.counter, 5)
self.assertEqual(
sorted([conn for _, _, conn in db._connections]),
[1, 2, 3, 4, 5]) # All 5 are ready to be re-used.
self.assertEqual(db._in_use, {})
def test_max_conns(self):
for i in range(self.db._max_connections):
self.db._state.closed = True # Hack to make it appear closed.
self.db.connect()
self.assertEqual(self.db.connection(), i + 1)
self.db._state.closed = True
self.assertRaises(ValueError, self.db.connect)
def test_stale_timeout(self):
# Create a test database with a very short stale timeout.
db = FakePooledDatabase('testing', stale_timeout=.001)
self.assertEqual(db.connection(), 1)
self.assertTrue(1 in db._in_use)
# Sleep long enough for the connection to be considered stale.
time.sleep(.001)
# When we close, since the conn is stale it won't be returned to
# the pool.
db.close()
self.assertEqual(db._in_use, {})
self.assertEqual(db._connections, [])
# A new connection will be returned.
self.assertEqual(db.connection(), 2)
def test_stale_on_checkout(self):
# Create a test database with a very short stale timeout.
db = FakePooledDatabase('testing', stale_timeout=.005)
self.assertEqual(db.connection(), 1)
self.assertTrue(1 in db._in_use)
# When we close, the conn should not be stale so it won't return to
# the pool.
db.close()
assert len(db._connections) == 1, 'Test runner too slow!'
# Sleep long enough for the connection to be considered stale.
time.sleep(.005)
self.assertEqual(db._in_use, {})
self.assertEqual(len(db._connections), 1)
# A new connection will be returned, as the original one is stale.
# The stale connection (1) will be removed.
self.assertEqual(db.connection(), 2)
def test_manual_close(self):
self.assertEqual(self.db.connection(), 1)
self.db.manual_close()
# When we manually close a connection that's not yet stale, we add it
# back to the queue (because close() calls _close()), then close it
# for real, and mark it with a tombstone. The next time it's checked
# out, it will simply be removed and skipped over.
self.assertEqual(len(self.db._connections), 0)
self.assertEqual(self.db._in_use, {})
self.assertEqual(self.db.connection(), 2)
self.assertEqual(len(self.db._connections), 0)
self.assertEqual(list(self.db._in_use.keys()), [2])
self.db.close()
self.assertEqual(self.db.connection(), 2)
def test_close_idle(self):
db = FakePooledDatabase('testing', counter=3)
now = time.time()
heapq.heappush(db._connections, (now - 10, None, 3))
heapq.heappush(db._connections, (now - 5, None, 2))
heapq.heappush(db._connections, (now - 1, None, 1))
self.assertEqual(db.connection(), 3)
self.assertTrue(3 in db._in_use)
db.close_idle()
self.assertEqual(len(db._connections), 0)
self.assertEqual(len(db._in_use), 1)
self.assertTrue(3 in db._in_use)
self.assertEqual(db.connection(), 3)
db.manual_close()
self.assertEqual(db.connection(), 4)
def test_close_stale(self):
db = FakePooledDatabase('testing', counter=3)
now = time.time()
# Closing stale uses the last checkout time rather than the creation
# time for the connection.
db._in_use[1] = PoolConnection(now - 400, 1, now - 300)
db._in_use[2] = PoolConnection(now - 200, 2, now - 200)
db._in_use[3] = PoolConnection(now - 300, 3, now - 100)
db._in_use[4] = PoolConnection(now, 4, now)
self.assertEqual(db.close_stale(age=200), 2)
self.assertEqual(len(db._in_use), 2)
self.assertEqual(sorted(db._in_use), [3, 4])
def test_close_all(self):
db = FakePooledDatabase('testing', counter=3)
now = time.time()
heapq.heappush(db._connections, (now - 10, None, 3))
heapq.heappush(db._connections, (now - 5, None, 2))
heapq.heappush(db._connections, (now - 1, None, 1))
self.assertEqual(db.connection(), 3)
self.assertTrue(3 in db._in_use)
db.close_all()
self.assertEqual(len(db._connections), 0)
self.assertEqual(len(db._in_use), 0)
self.assertEqual(db.connection(), 4)
def test_stale_timeout_cascade(self):
now = time.time()
db = FakePooledDatabase('testing', stale_timeout=10)
conns = [
(now - 20, None, 1),
(now - 15, None, 2),
(now - 5, None, 3),
(now, None, 4),
]
for ts_conn in conns:
heapq.heappush(db._connections, ts_conn)
self.assertEqual(db.connection(), 3)
self.assertEqual(len(db._in_use), 1)
self.assertTrue(3 in db._in_use)
self.assertEqual(db._connections, [(now, None, 4)])
def test_connect_cascade(self):
now = time.time()
class ClosedPooledDatabase(FakePooledDatabase):
def _is_closed(self, conn):
return conn in (2, 4)
db = ClosedPooledDatabase('testing', stale_timeout=10)
conns = [
(now - 15, None, 1), # Skipped due to being stale.
(now - 5, None, 2), # Will appear closed.
(now - 3, None, 3),
(now, None, 4), # Will appear closed.
]
db.counter = 4 # The next connection we create will have id=5.
for ts_conn in conns:
heapq.heappush(db._connections, ts_conn)
# Conn 3 is not stale or closed, so we will get it.
self.assertEqual(db.connection(), 3)
self.assertEqual(len(db._in_use), 1)
self.assertTrue(3 in db._in_use)
pool_conn = db._in_use[3]
self.assertEqual(pool_conn.timestamp, now - 3)
self.assertEqual(pool_conn.connection, 3)
self.assertEqual(db._connections, [(now, None, 4)])
# Since conn 4 is closed, we will open a new conn.
db._state.closed = True # Pretend we're in a different thread.
db.connect()
self.assertEqual(db.connection(), 5)
self.assertEqual(sorted(db._in_use.keys()), [3, 5])
self.assertEqual(db._connections, [])
def test_db_context(self):
self.assertEqual(self.db.connection(), 1)
with self.db:
self.assertEqual(self.db.connection(), 1)
self.assertEqual(self.db.transaction_history, ['O1'])
self.assertEqual(self.db.connection(), 1)
self.assertEqual(self.db.transaction_history, ['O1', 'X1'])
with self.db:
self.assertEqual(self.db.connection(), 1)
self.assertEqual(len(self.db._connections), 1)
self.assertEqual(len(self.db._in_use), 0)
def test_db_context_threads(self):
signal = threading.Event()
def create_context():
with self.db:
signal.wait()
threads = [threading.Thread(target=create_context) for i in range(5)]
for thread in threads: thread.start()
while len(self.db.transaction_history) < 5:
time.sleep(.001)
signal.set()
for thread in threads: thread.join()
self.assertEqual(self.db.counter, 5)
self.assertEqual(len(self.db._connections), 5)
self.assertEqual(len(self.db._in_use), 0)
| TestPooledDatabase |
python | Lightning-AI__lightning | src/lightning/pytorch/demos/boring_classes.py | {
"start": 2340,
"end": 2764
} | class ____(IterableDataset):
"""
.. warning:: This is meant for testing/debugging and is experimental.
"""
def __init__(self, size: int, count: int):
self.count = count
self.size = size
def __iter__(self) -> Iterator[Tensor]:
for _ in range(len(self)):
yield torch.randn(self.size)
def __len__(self) -> int:
return self.count
| RandomIterableDatasetWithLen |
python | numba__numba | numba/cuda/tests/cudapy/test_exception.py | {
"start": 151,
"end": 5501
} | class ____(CUDATestCase):
def setUp(self):
super().setUp()
# LTO optimizes away the exception status due to an oversight
# in the way we generate it (it is not added to the used list).
self.skip_if_lto("Exceptions not supported with LTO")
def test_exception(self):
def foo(ary):
x = cuda.threadIdx.x
if x == 2:
# NOTE: indexing with a out-of-bounds constant can fail at
# compile-time instead (because the getitem is rewritten as a
# static_getitem)
ary.shape[-x]
unsafe_foo = cuda.jit(foo)
safe_foo = cuda.jit(debug=True, opt=False)(foo)
if not config.ENABLE_CUDASIM:
# Simulator throws exceptions regardless of debug
# setting
unsafe_foo[1, 3](np.array([0, 1]))
with self.assertRaises(IndexError) as cm:
safe_foo[1, 3](np.array([0, 1]))
self.assertIn("tuple index out of range", str(cm.exception))
def test_user_raise(self):
@cuda.jit(debug=True, opt=False)
def foo(do_raise):
if do_raise:
raise ValueError
foo[1, 1](False)
with self.assertRaises(ValueError):
foo[1, 1](True)
def case_raise_causing_warp_diverge(self, with_debug_mode):
"""Testing issue #2655.
Exception raising code can cause the compiler to miss location
of unifying branch target and resulting in unexpected warp
divergence.
"""
with_opt_mode = not with_debug_mode
@cuda.jit(debug=with_debug_mode, opt=with_opt_mode)
def problematic(x, y):
tid = cuda.threadIdx.x
ntid = cuda.blockDim.x
if tid > 12:
for i in range(ntid):
y[i] += x[i] // y[i]
cuda.syncthreads()
if tid < 17:
for i in range(ntid):
x[i] += x[i] // y[i]
@cuda.jit
def oracle(x, y):
tid = cuda.threadIdx.x
ntid = cuda.blockDim.x
if tid > 12:
for i in range(ntid):
if y[i] != 0:
y[i] += x[i] // y[i]
cuda.syncthreads()
if tid < 17:
for i in range(ntid):
if y[i] != 0:
x[i] += x[i] // y[i]
n = 32
got_x = 1. / (np.arange(n) + 0.01)
got_y = 1. / (np.arange(n) + 0.01)
problematic[1, n](got_x, got_y)
expect_x = 1. / (np.arange(n) + 0.01)
expect_y = 1. / (np.arange(n) + 0.01)
oracle[1, n](expect_x, expect_y)
np.testing.assert_almost_equal(expect_x, got_x)
np.testing.assert_almost_equal(expect_y, got_y)
def test_raise_causing_warp_diverge(self):
"""Test case for issue #2655.
"""
self.case_raise_causing_warp_diverge(with_debug_mode=False)
# The following two cases relate to Issue #7806: Division by zero stops the
# kernel. https://github.com/numba/numba/issues/7806.
def test_no_zero_division_error(self):
# When debug is False:
# - Division by zero raises no exception
# - Execution proceeds after a divide by zero
@cuda.jit
def f(r, x, y):
r[0] = y[0] / x[0]
r[1] = y[0]
r = np.zeros(2)
x = np.zeros(1)
y = np.ones(1)
f[1, 1](r, x, y)
self.assertTrue(np.isinf(r[0]), 'Expected inf from div by zero')
self.assertEqual(r[1], y[0], 'Expected execution to continue')
def test_zero_division_error_in_debug(self):
# When debug is True:
# - Zero by division raises an exception
# - Execution halts at the point of division by zero
@cuda.jit(debug=True, opt=False)
def f(r, x, y):
r[0] = y[0] / x[0]
r[1] = y[0]
r = np.zeros(2)
x = np.zeros(1)
y = np.ones(1)
# Simulator and device behaviour differs slightly in the exception
# raised - in debug mode, the CUDA target uses the Python error model,
# which gives a ZeroDivision error. The simulator uses NumPy with the
# error mode for division by zero set to raise, which results in a
# FloatingPointError instead.
if config.ENABLE_CUDASIM:
exc = FloatingPointError
else:
exc = ZeroDivisionError
with self.assertRaises(exc):
f[1, 1](r, x, y)
self.assertEqual(r[0], 0, 'Expected result to be left unset')
self.assertEqual(r[1], 0, 'Expected execution to stop')
@xfail_unless_cudasim
def test_raise_in_device_function(self):
# This is an expected failure because reporting of exceptions raised in
# device functions does not work correctly - see Issue #8036:
# https://github.com/numba/numba/issues/8036
msg = 'Device Function Error'
@cuda.jit(device=True)
def f():
raise ValueError(msg)
@cuda.jit(debug=True)
def kernel():
f()
with self.assertRaises(ValueError) as raises:
kernel[1, 1]()
self.assertIn(msg, str(raises.exception))
if __name__ == '__main__':
unittest.main()
| TestException |
python | Textualize__textual | docs/examples/app/suspend.py | {
"start": 126,
"end": 445
} | class ____(App[None]):
def compose(self) -> ComposeResult:
yield Button("Open the editor", id="edit")
@on(Button.Pressed, "#edit")
def run_external_editor(self) -> None:
with self.suspend(): # (1)!
system("vim")
if __name__ == "__main__":
SuspendingApp().run()
| SuspendingApp |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 6325,
"end": 6352
} | class ____(B15):
pass
| C15 |
python | PrefectHQ__prefect | tests/utilities/test_processutils.py | {
"start": 3173,
"end": 5110
} | class ____:
str_cmd = "ls -a"
list_cmd = ["ls", "-a"]
async def test_errors_if_cmd_is_not_list(self):
with pytest.raises(TypeError):
async with open_process(command=self.str_cmd):
pass
async def test_runs_if_cmd_is_list(self):
async with open_process(self.list_cmd) as process:
assert process
@pytest.mark.skipif(
sys.platform != "win32",
reason="CTRL_C_HANDLER is only defined in Windows",
)
async def test_adds_ctrl_c_handler_to_win32_process_group(self, monkeypatch):
"""
If the process is a Windows process group, we need to add a handler for
CTRL_C_EVENT to the process group so we can kill the process group
when the user presses CTRL+C.
"""
mock_ctrl_c_handler = mock.Mock()
monkeypatch.setattr(
prefect.utilities.processutils, "_win32_ctrl_handler", mock_ctrl_c_handler
)
mock_set_console_ctrl_handler = mock.Mock()
monkeypatch.setattr(
prefect.utilities.processutils.windll.kernel32,
"SetConsoleCtrlHandler",
mock_set_console_ctrl_handler,
)
mock_process = mock.AsyncMock()
mock_process.terminate = mock.MagicMock()
mock_open_process = mock.AsyncMock(return_value=mock_process)
monkeypatch.setattr(
prefect.utilities.processutils, "_open_anyio_process", mock_open_process
)
await prefect.utilities.processutils.run_process(
self.list_cmd, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
)
mock_open_process.assert_called_once_with(
" ".join(self.list_cmd),
stdout=mock.ANY,
stderr=mock.ANY,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
)
mock_set_console_ctrl_handler.assert_called_once_with(mock_ctrl_c_handler, 1)
| TestOpenProcess |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/interfaces.py | {
"start": 14881,
"end": 16193
} | class ____(_DCAttributeOptions, _MappedAttribute[_T]):
"""interface for declarative-capable construct that delivers one or more
Column objects to the declarative process to be part of a Table.
"""
__slots__ = ()
@property
def mapper_property_to_assign(self) -> Optional[MapperProperty[_T]]:
"""return a MapperProperty to be assigned to the declarative mapping"""
raise NotImplementedError()
@property
def columns_to_assign(self) -> List[Tuple[Column[_T], int]]:
"""A list of Column objects that should be declaratively added to the
new Table object.
"""
raise NotImplementedError()
# NOTE: MapperProperty needs to extend _MappedAttribute so that declarative
# typing works, i.e. "Mapped[A] = relationship()". This introduces an
# inconvenience which is that all the MapperProperty objects are treated
# as descriptors by typing tools, which are misled by this as assignment /
# access to a descriptor attribute wants to move through __get__.
# Therefore, references to MapperProperty as an instance variable, such
# as in PropComparator, may have some special typing workarounds such as the
# use of sqlalchemy.util.typing.DescriptorReference to avoid mis-interpretation
# by typing tools
@inspection._self_inspects
| _MapsColumns |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 9821,
"end": 11155
} | class ____(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self = MegatronBertSelfAttention(config, layer_idx=layer_idx)
self.output = MegatronBertSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor]:
ln_outputs = self.ln(hidden_states)
self_outputs = self.self(
ln_outputs,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->MegatronBert
| MegatronBertAttention |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 24468,
"end": 25490
} | class ____(Enum):
"""
Enum for indexing order.
"""
UNKNOWN = 0
INCREASING = 1
DECREASING = 2
UNORDERED = 3
@staticmethod
def check(a: npt.NDArray[Any]) -> Order:
diff = np.diff(a)
diff_positive = diff >= 0
n_diff_positive = np.count_nonzero(diff_positive)
all_increasing = n_diff_positive == len(diff_positive)
any_increasing = n_diff_positive > 0
if all_increasing:
order = Order.INCREASING
elif any_increasing:
order = Order.UNORDERED
else:
order = Order.DECREASING
return order
def wraparound_indices(x: npt.NDArray[Any], dim_len: int) -> None:
loc_neg = x < 0
if np.any(loc_neg):
x[loc_neg] += dim_len
def boundscheck_indices(x: npt.NDArray[Any], dim_len: int) -> None:
if np.any(x < 0) or np.any(x >= dim_len):
msg = f"index out of bounds for dimension with length {dim_len}"
raise BoundsCheckError(msg)
@dataclass(frozen=True)
| Order |
python | huggingface__transformers | tests/models/gpt_neo/test_modeling_gpt_neo.py | {
"start": 14690,
"end": 20058
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
GPTNeoModel,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GPTNeoModel,
"question-answering": GPTNeoForQuestionAnswering,
"text-classification": GPTNeoForSequenceClassification,
"text-generation": GPTNeoForCausalLM,
"token-classification": GPTNeoForTokenClassification,
"zero-shot": GPTNeoForSequenceClassification,
}
if is_torch_available()
else {}
)
test_missing_keys = False
# special case for DoubleHeads model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
return inputs_dict
def setUp(self):
self.model_tester = GPTNeoModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPTNeoConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_gpt_neo_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_model(*config_and_inputs)
def test_gpt_neo_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_model_past(*config_and_inputs)
def test_gpt_neo_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_model_attention_mask_past(*config_and_inputs)
def test_gpt_neo_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_model_past_large_inputs(*config_and_inputs)
def test_gpt_neo_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_gpt_neo_question_answering_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_for_question_answering(*config_and_inputs)
def test_gpt_neo_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_for_sequence_classification(*config_and_inputs)
def test_gpt_neo_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt_neo_for_token_classification(*config_and_inputs)
def test_gpt_neo_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def _get_hidden_states(self):
return torch.tensor(
[
[
[0.4983, -0.7584, -1.6944, 0.5440],
[2.6918, 0.4206, 0.4176, 0.2055],
[-0.0071, -0.0405, -1.4920, -0.3630],
[1.0492, 0.1599, -1.7648, 0.2419],
[-1.8348, 2.0514, -0.1946, 0.3203],
[0.7672, -1.1600, -1.7118, -0.9056],
[0.2986, 0.5372, 0.7729, -0.1927],
[0.0285, 0.2629, -1.1156, -1.1992],
]
],
dtype=torch.float32,
device=torch_device,
)
def test_local_attn_probs(self):
model = GPTNeoModel.from_pretrained("valhalla/gpt-neo-random-tiny").eval()
layer = model.h[1].attn.attention.to(torch_device)
hidden_states = self._get_hidden_states()
hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2)
batch_size, seq_length, _ = hidden_states.shape
mask_tokens = 2
attention_mask = torch.ones(batch_size, seq_length, device=torch_device, dtype=torch.long)
attention_mask[:, -mask_tokens:] = 0 # dont attend last mask_tokens
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = (1.0 - attention_mask) * -10000.0
attn_probs = layer(hidden_states, attention_mask=attention_mask, output_attentions=True)[-1]
# the last 2 tokens are masked, and should have 0 attn_probs
self.assertTrue(torch.all(attn_probs[:, :, -mask_tokens:, -mask_tokens:] == 0))
# in local attention each token can only attend to the previous window_size tokens (including itself)
# here window_size is 4, so a token at index 5 can only attend to indices [2, 3, 4, 5]
# and the attn_probs should be 0 for token [0, 1]
self.assertTrue(torch.all(attn_probs[:, :, 5, 2:6] != 0))
self.assertTrue(torch.all(attn_probs[:, :, 5, :2] == 0))
@require_torch
| GPTNeoModelTest |
python | ipython__ipython | IPython/sphinxext/ipython_directive.py | {
"start": 11379,
"end": 34035
} | class ____:
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = "nocolor"
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbeddedSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.tmp_profile_dir = tmp_profile_dir
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
# this is assigned by the SetUp method of IPythonDirective
# to point at itself.
#
# So, you can access handy things at self.directive.state
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def cleanup(self):
shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history):
return self.process_input_lines([line], store_history=store_history)
def process_input_lines(self, lines, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
source_raw = '\n'.join(lines)
try:
sys.stdout = self.cout
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
# as absolute path for Sphinx
# sphinx expects a posix path, even on Windows
path = pathlib.Path(savefig_dir, filename)
outfile = '/' + path.relative_to(source_dir).as_posix()
imagerows = ['.. image:: %s' % outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
if input_lines[0].endswith(';'):
is_semicolon = True
#for i, line in enumerate(input_lines):
# process the first input line
if is_verbatim:
self.process_input_lines([''])
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_lines(input_lines, store_history=store_history)
if not is_suppress:
for i, line in enumerate(input_lines):
if i == 0:
formatted_line = '%s %s'%(input_prompt, line)
else:
formatted_line = '%s %s'%(continuation, line)
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# The "rest" is the standard output of the input. This needs to be
# added when in verbatim mode. If there is no "rest", then we don't
# add it, as the new line will be added by the processed output.
ret.append(rest)
# Fetch the processed output. (This is not the submitted output.)
self.cout.seek(0)
processed_output = self.cout.read()
if not is_suppress and not is_semicolon:
#
# In IPythonDirective.run, the elements of `ret` are eventually
# combined such that '' entries correspond to newlines. So if
# `processed_output` is equal to '', then the adding it to `ret`
# ensures that there is a blank line between consecutive inputs
# that have no outputs, as in:
#
# In [1]: x = 4
#
# In [2]: x = 5
#
# When there is processed output, it has a '\n' at the tail end. So
# adding the output to `ret` will provide the necessary spacing
# between consecutive input/output blocks, as in:
#
# In [1]: x
# Out[1]: 5
#
# In [2]: x
# Out[2]: 5
#
# When there is stdout from the input, it also has a '\n' at the
# tail end, and so this ensures proper spacing as well. E.g.:
#
# In [1]: print(x)
# 5
#
# In [2]: x = 5
#
# When in verbatim mode, `processed_output` is empty (because
# nothing was passed to IP. Sometimes the submitted code block has
# an Out[] portion and sometimes it does not. When it does not, we
# need to ensure proper spacing, so we have to add '' to `ret`.
# However, if there is an Out[] in the submitted code, then we do
# not want to add a newline as `process_output` has stuff to add.
# The difficulty is that `process_input` doesn't know if
# `process_output` will be called---so it doesn't know if there is
# Out[] in the code block. The requires that we include a hack in
# `process_block`. See the comments there.
#
ret.append(processed_output)
elif is_semicolon:
# Make sure there is a newline after the semicolon.
ret.append('')
# context information
filename = "Unknown"
lineno = 0
if self.directive.state:
filename = self.directive.state.document.current_source
lineno = self.directive.state.document.current_line
# Use sphinx logger for warnings
logger = logging.getLogger(__name__)
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and (
("Traceback" in processed_output) or ("SyntaxError" in processed_output)
):
s = "\n>>>" + ("-" * 73) + "\n"
s += "Exception in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
s += processed_output + "\n"
s += "<<<" + ("-" * 73)
logger.warning(s)
if self.warning_is_error:
raise RuntimeError(
"Unexpected exception in `{}` line {}".format(filename, lineno)
)
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\n>>>" + ("-" * 73) + "\n"
s += "Warning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
s += ("-" * 76) + "\n"
s += warnings.formatwarning(
w.message, w.category, w.filename, w.lineno, w.line
)
s += "<<<" + ("-" * 73)
logger.warning(s)
if self.warning_is_error:
raise RuntimeError(
"Unexpected warning in `{}` line {}".format(filename, lineno)
)
self.clear_cout()
return (ret, input_lines, processed_output,
is_doctest, decorator, image_file, image_directive)
def process_output(self, data, output_prompt, input_lines, output,
is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
# Recall: `data` is the submitted output, and `output` is the processed
# output from `input_lines`.
TAB = ' ' * 4
if is_doctest and output is not None:
found = output # This is the processed output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
# When in verbatim mode, this holds additional submitted output
# to be written in the final Sphinx output.
# https://github.com/ipython/ipython/issues/5776
out_data = []
is_verbatim = decorator=='@verbatim' or self.is_verbatim
if is_verbatim and data.strip():
# Note that `ret` in `process_block` has '' as its last element if
# the code block was in verbatim mode. So if there is no submitted
# output, then we will have proper spacing only if we do not add
# an additional '' to `out_data`. This is why we condition on
# `and data.strip()`.
# The submitted output has no output prompt. If we want the
# prompt and the code to appear, we need to join them now
# instead of adding them separately---as this would create an
# undesired newline. How we do this ultimately depends on the
# format of the output regex. I'll do what works for the default
# prompt for now, and we might have to adjust if it doesn't work
# in other cases. Finally, the submitted output does not have
# a trailing newline, so we must add it manually.
out_data.append("{0} {1}\n".format(output_prompt, data))
return out_data
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
# print('SAVEFIG', command) # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle pseudo-decorators, whilst ensuring real python decorators are treated as input
if any(
line_stripped.startswith("@" + pseudo_decorator)
for pseudo_decorator in PSEUDO_DECORATORS
):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
| EmbeddedSphinxShell |
python | PyCQA__pylint | doc/data/messages/o/overridden-final-method/good.py | {
"start": 101,
"end": 164
} | class ____(Animal):
def can_purr(self):
return True
| Cat |
python | ansible__ansible | lib/ansible/modules/service.py | {
"start": 6898,
"end": 17195
} | class ____(object):
"""
This is the generic Service manipulation class that is subclassed
based on platform.
A subclass should override the following action methods:-
- get_service_tools
- service_enable
- get_service_status
- service_control
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Service)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.state = module.params['state']
self.sleep = module.params['sleep']
self.pattern = module.params['pattern']
self.enable = module.params['enabled']
self.runlevel = module.params['runlevel']
self.changed = False
self.running = None
self.crashed = None
self.action = None
self.svc_cmd = None
self.svc_initscript = None
self.svc_initctl = None
self.enable_cmd = None
self.arguments = module.params.get('arguments', '')
self.rcconf_file = None
self.rcconf_key = None
self.rcconf_value = None
self.svc_change = False
# ===========================================
# Platform specific methods (must be replaced by subclass).
def get_service_tools(self):
self.module.fail_json(msg="get_service_tools not implemented on target platform")
def service_enable(self):
self.module.fail_json(msg="service_enable not implemented on target platform")
def get_service_status(self):
self.module.fail_json(msg="get_service_status not implemented on target platform")
def service_control(self):
self.module.fail_json(msg="service_control not implemented on target platform")
# ===========================================
# Generic methods that should be used on all platforms.
def execute_command(self, cmd, daemonize=False):
locale = get_best_parsable_locale(self.module)
lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
# Most things don't need to be daemonized
if not daemonize:
# chkconfig localizes messages and we're screen scraping so make
# sure we use the C locale
return self.module.run_command(cmd, environ_update=lang_env)
# This is complex because daemonization is hard for people.
# What we do is daemonize a part of this module, the daemon runs the
# command, picks up the return code and output, and returns it to the
# main process.
pipe = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
if fd != 0:
os.dup2(fd, 0)
if fd != 1:
os.dup2(fd, 1)
if fd != 2:
os.dup2(fd, 2)
if fd not in (0, 1, 2):
os.close(fd)
# Make us a daemon. Yes, that's all it takes.
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# Start the command
cmd = to_text(cmd, errors='surrogate_or_strict')
cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
# In either of the above cases, pass a list of byte strings to Popen
# chkconfig localizes messages and we're screen scraping so make
# sure we use the C locale
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=lang_env, preexec_fn=lambda: os.close(pipe[1]))
stdout = b""
stderr = b""
fds = [p.stdout, p.stderr]
# Wait for all output, or until the main process is dead and its output is done.
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if not (rfd + wfd + efd) and p.poll() is not None:
break
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 4096)
if not dat:
fds.remove(p.stdout)
stdout += dat
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 4096)
if not dat:
fds.remove(p.stderr)
stderr += dat
p.wait()
# Return a JSON blob to parent
blob = json.dumps([p.returncode, to_text(stdout), to_text(stderr)])
os.write(pipe[1], to_bytes(blob, errors='surrogate_or_strict'))
os.close(pipe[1])
os._exit(0)
elif pid == -1:
self.module.fail_json(msg="unable to fork")
else:
os.close(pipe[1])
os.waitpid(pid, 0)
# Wait for data from daemon process and process it.
data = b""
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
dat = os.read(pipe[0], 4096)
if not dat:
break
data += dat
return json.loads(to_text(data, errors='surrogate_or_strict'))
def check_ps(self):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = self.module.get_bin_path('ps', True)
(rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
self.running = False
lines = psout.split("\n")
for line in lines:
if self.pattern in line and "pattern=" not in line:
# so as to not confuse ./hacking/test-module.py
self.running = True
break
def check_service_changed(self):
if self.state and self.running is None:
self.module.fail_json(msg="failed determining service state, possible typo of service name?")
# Find out if state has changed
if not self.running and self.state in ["reloaded", "started"]:
self.svc_change = True
elif self.running and self.state in ["reloaded", "stopped"]:
self.svc_change = True
elif self.state == "restarted":
self.svc_change = True
if self.module.check_mode and self.svc_change:
self.module.exit_json(changed=True, msg='service state changed')
def modify_service_state(self):
# Only do something if state will change
if self.svc_change:
# Control service
if self.state in ['started']:
self.action = "start"
elif not self.running and self.state == 'reloaded':
self.action = "start"
elif self.state == 'stopped':
self.action = "stop"
elif self.state == 'reloaded':
self.action = "reload"
elif self.state == 'restarted':
self.action = "restart"
if self.module.check_mode:
self.module.exit_json(changed=True, msg='changing service state')
return self.service_control()
else:
# If nothing needs to change just say all is well
rc = 0
err = ''
out = ''
return rc, out, err
def service_enable_rcconf(self):
if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
self.changed = None
entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
with open(self.rcconf_file, "r") as RCFILE:
new_rc_conf = []
# Build a list containing the possibly modified file.
for rcline in RCFILE:
# Parse line removing whitespaces, quotes, etc.
rcarray = shlex.split(rcline, comments=True)
if len(rcarray) >= 1 and '=' in rcarray[0]:
(key, value) = rcarray[0].split("=", 1)
if key == self.rcconf_key:
if value.upper() == self.rcconf_value:
# Since the proper entry already exists we can stop iterating.
self.changed = False
break
else:
# We found the key but the value is wrong, replace with new entry.
rcline = entry
self.changed = True
# Add line to the list.
new_rc_conf.append(rcline.strip() + '\n')
# If we did not see any trace of our entry we need to add it.
if self.changed is None:
new_rc_conf.append(entry)
self.changed = True
if self.changed is True:
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
# Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
# This way the replacement operation is atomic.
rcconf_dir = os.path.dirname(self.rcconf_file)
rcconf_base = os.path.basename(self.rcconf_file)
(TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
# Write out the contents of the list into our temporary file.
for rcline in new_rc_conf:
os.write(TMP_RCCONF, rcline.encode())
# Close temporary file.
os.close(TMP_RCCONF)
# Replace previous rc.conf.
self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
| Service |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 6032,
"end": 6201
} | class ____(TestAtomFeed):
"""
A feed with naive (non-timezone-aware) dates.
"""
def item_pubdate(self, item):
return item.published
| NaiveDatesFeed |
python | mitmproxy__pdoc | test/testdata/typed_dict.py | {
"start": 31,
"end": 100
} | class ____(TypedDict):
a: int | None
"""First attribute."""
| Foo |
python | celery__celery | t/unit/concurrency/test_prefork.py | {
"start": 5413,
"end": 5479
} | class ____(mp.TaskPool):
Pool = BlockingPool = MockPool
| TaskPool |
python | pappasam__jedi-language-server | tests/test_data/symbol/symbol_test1.py | {
"start": 238,
"end": 495
} | class ____:
"""Class for symbols test."""
def __init__(self, arg1: Any):
self.somedata = arg1
def do_something(self):
"""Method for symbols test."""
def so_something_else(self):
"""Method for symbols test."""
| SomeClass |
python | doocs__leetcode | solution/0500-0599/0517.Super Washing Machines/Solution.py | {
"start": 0,
"end": 320
} | class ____:
def findMinMoves(self, machines: List[int]) -> int:
n = len(machines)
k, mod = divmod(sum(machines), n)
if mod:
return -1
ans = s = 0
for x in machines:
x -= k
s += x
ans = max(ans, abs(s), x)
return ans
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_text_editor_code_execution_tool_result_error.py | {
"start": 247,
"end": 557
} | class ____(BaseModel):
error_code: Literal[
"invalid_tool_input", "unavailable", "too_many_requests", "execution_time_exceeded", "file_not_found"
]
error_message: Optional[str] = None
type: Literal["text_editor_code_execution_tool_result_error"]
| BetaTextEditorCodeExecutionToolResultError |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 8927,
"end": 9163
} | class ____(Tokenizer):
_tokens = (SYNTAX, ARGUMENT)
def _tokenize(self, value, index):
if index == 0 and not self._is_assign(value):
return ERROR
return Tokenizer._tokenize(self, value, index)
| Variable |
python | pytorch__pytorch | test/jit/test_with.py | {
"start": 448,
"end": 19775
} | class ____(JitTestCase):
"""
A suite of tests for with statements.
"""
def test_with_as(self):
"""
Check that with statements that use the 'as' keyword to bind expressions
to targets work as expected.
"""
@torch.jit.script
class Context:
"""
This class implements a basic context manager interface for use in
the unit tests. Unlike Context, the stateful part of this class
is a Tensor that is mutated in-place so that modifications made in the
JIT interpreter are visible outside of it.
"""
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any) -> bool:
self.count.sub_(0.3)
return True
make_global(Context)
def test_basic(x: torch.Tensor) -> torch.Tensor:
"""Basic test with one with-statement."""
c = Context(1)
with c as mult:
y = x + mult
y *= c.count
return y
def test_pass(x: torch.Tensor) -> torch.Tensor:
"""
Test with a pass statement inside a with-statement. Although
the body of the with is empty, __enter__ and __exit__ should
still be called.
"""
c = Context(1)
with c as mult:
pass
x *= c.count
return x
def test_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that returning early from inside a with-statement works
as expected.
"""
with c as mult:
y = x + mult
return y
x = y + y
return x
def test_conditional_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that conditionally returning early from inside a with-statement works
as expected.
"""
with c as mult:
y = x + mult
if mult > 0:
return y
x = y + y
return x
def test_break(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that breaking early from inside a with-statement works
as expected.
"""
with c as mult:
for a in l:
if a == 0:
break
x += a * mult
return x
def test_continue(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that using continue inside a with-statement works
as expected.
"""
with c as mult:
for a in l:
if a == 0:
continue
x += a * mult
return x
def test_serial(x: torch.Tensor) -> torch.Tensor:
"""
Test two with-statements in a row.
"""
c = Context(1)
with c as mult:
y = x + mult
with c as mult:
y *= mult
return y
def test_nested(x: torch.Tensor) -> torch.Tensor:
"""
Test nested with-statements.
"""
c = Context(1)
with c as m:
with c as n:
y = x + n
y *= m
return y
def test_combined(x: torch.Tensor) -> torch.Tensor:
"""
Test a with-statement with multiple with items.
"""
c = Context(1)
d = Context(2)
with c as m, d as n:
y = x + (m + n)
return y
test_input = torch.randn(2, 2)
test_context = Context(2)
test_list = [2, 0, 1, 3, 0, 2]
self.checkScript(test_basic, (test_input,))
self.checkScript(test_pass, (test_input,))
self.checkScript(test_early_return, (test_input, test_context))
self.checkScript(test_break, (test_input, test_context, test_list))
self.checkScript(test_continue, (test_input, test_context, test_list))
self.assertEqual(test_context.count, 2)
self.checkScript(test_serial, (test_input,))
self.checkScript(test_nested, (test_input,))
self.checkScript(test_combined, (test_input,))
def test_with_no_as(self):
"""
Check that with statements that do not use the 'as' keyword to bind expressions
to targets work as expected.
"""
@torch.jit.script
class Context:
"""
This class implements a basic context manager interface for use in
the unit tests. Unlike Context, the stateful part of this class
is a Tensor that is mutated in-place so that modifications made in the
JIT interpreter are visible outside of it.
"""
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any):
self.count.sub_(0.3)
make_global(Context)
def test_basic(x: torch.Tensor) -> torch.Tensor:
"""Basic test with one with-statement."""
c = Context(1)
with c:
y = x + c.count
y *= c.count
return y
def test_pass(x: torch.Tensor) -> torch.Tensor:
"""
Test with a pass statement inside a with-statement. Although
the body of the with is empty, __enter__ and __exit__ should
still be called.
"""
c = Context(1)
with c:
pass
x *= c.count
return x
def test_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that returning early from inside a with-statement works
as expected.
"""
with c:
y = x + c.count
return y
x = y + y
return x
def test_conditional_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that conditionally returning early from inside a with-statement works
as expected.
"""
with c:
y = x + c.count
if c.count > 0:
return y
x = y + y
return x
def test_break(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that breaking early from inside a with-statement works
as expected.
"""
with c:
for a in l:
if a == 0:
break
x += a * c.count
return x
def test_continue(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that using continue inside a with-statement works
as expected.
"""
with c:
for a in l:
if a == 0:
continue
x += a * c.count
return x
def test_serial(x: torch.Tensor) -> torch.Tensor:
"""
Test two with-statements in a row.
"""
c = Context(1)
with c:
y = x + c.count
with c:
y *= c.count
return y
def test_nested(x: torch.Tensor) -> torch.Tensor:
"""
Test nested with-statements.
"""
c = Context(1)
with c:
with c:
y = x + c.count
y *= c.count
return y
def test_combined(x: torch.Tensor) -> torch.Tensor:
"""
Test a with-statement with multiple with items.
"""
c = Context(1)
d = Context(2)
with c, d:
y = x + (c.count + d.count)
return y
test_input = torch.randn(2, 2)
test_context = Context(2)
test_list = [2, 0, 1, 3, 0, 2]
self.checkScript(test_basic, (test_input,))
self.checkScript(test_pass, (test_input,))
self.checkScript(test_early_return, (test_input, test_context))
self.checkScript(test_break, (test_input, test_context, test_list))
self.checkScript(test_continue, (test_input, test_context, test_list))
self.assertEqual(test_context.count, 2)
self.checkScript(test_serial, (test_input,))
self.checkScript(test_nested, (test_input,))
self.checkScript(test_combined, (test_input,))
def test_with_exceptions(self):
"""
Check that exceptions thrown in the bodies of with-statements are
handled correctly.
"""
@torch.jit.script
class Context:
"""
This class implements a basic context manager interface for use in
the unit tests. Unlike Context, the stateful part of this class
is a Tensor that is mutated in-place so that modifications made in the
JIT interpreter are visible outside of it.
"""
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any):
self.count.sub_(0.3)
make_global(Context)
@torch.jit.script
def method_that_raises() -> torch.Tensor:
raise Exception("raised exception") # noqa: TRY002
@torch.jit.script
def test_exception(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test the case in which an exception is thrown while executing the body of a with-statement.
"""
with c as _:
x += method_that_raises()
return x
@torch.jit.script
def test_exception_nested(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test the case in which an exception is thrown while executing the body of a nested with-statement.
"""
with c as _:
with c as _:
x += method_that_raises()
return x
@torch.jit.script
def with_that_raises(c: Context) -> torch.Tensor:
a = torch.tensor([1])
with c as _:
a += method_that_raises()
return a
@torch.jit.script
def test_exception_fn_call(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test the case in which an exception is thrown while there are active with-statements in two different
frames.
"""
with c as _:
x += with_that_raises(c)
return x
c = Context(1)
# checkScript and checkScriptRaisesRegex cannot be used because the string frontend will
# not compile class types (of which Context, the context manager being used for this test
# is one).
with self.assertRaisesRegexWithHighlight(
Exception, r"raised exception", 'raise Exception("raised exception'
):
test_exception(torch.randn(2), c)
self.assertEqual(c.count, 1)
with self.assertRaisesRegexWithHighlight(
Exception, r"raised exception", 'raise Exception("raised exception'
):
test_exception_nested(torch.randn(2), c)
self.assertEqual(c.count, 1)
with self.assertRaisesRegexWithHighlight(
Exception, r"raised exception", 'raise Exception("raised exception'
):
test_exception_fn_call(torch.randn(2), c)
self.assertEqual(c.count, 1)
def test_with_errors(self):
"""
Check that errors related to with-statements are detected and reported correctly.
"""
@torch.jit.script
class NoEnterNoExit:
"""
This class is missing __enter__ and __exit__ methods.
"""
def __init__(self) -> None:
self.count = 1
@torch.jit.script
class BadEnter:
"""
This class has an __enter__ method with an incorrect signature.
"""
def __init__(self) -> None:
self.count = 1
def __enter__(self, incr: int): # noqa: PLE0302
self.count += incr
def __exit__(self, type: Any, value: Any, tb: Any):
pass
@torch.jit.script
class BadExit:
"""
This class has an __exit__ method with an incorrect signature.
"""
def __init__(self) -> None:
self.count = 1
def __enter__(self):
self.count += 1
def __exit__(self, type: Any, value: Any): # noqa: PLE0302
pass
@torch.jit.script
class ExitIncorrectTypes:
"""
This class has an __exit__ method with unsupported argument types.
"""
def __init__(self) -> None:
self.count = 1
def __enter__(self):
self.count += 1
def __exit__(self, type: Any, value: int, tb: int):
pass
def test_no_enter_no_exit(x: torch.Tensor, cm: NoEnterNoExit) -> torch.Tensor:
with cm as _:
pass
return x
def test_bad_enter(x: torch.Tensor, cm: BadEnter) -> torch.Tensor:
with cm as _:
pass
return x
def test_bad_exit(x: torch.Tensor, cm: BadExit) -> torch.Tensor:
with cm as _:
pass
return x
def test_exit_incorrect_types(
x: torch.Tensor, cm: ExitIncorrectTypes
) -> torch.Tensor:
with cm as _:
pass
return x
def test_enter_without_object():
with "not_object" as obj:
pass
test_tensor = torch.randn(5, dtype=torch.double)
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"does not define __enter__ and __exit__ methods", "cm"
):
self.checkScript(test_no_enter_no_exit, (test_tensor, NoEnterNoExit()))
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"__enter__ must have only one argument and one return value",
"cm",
):
self.checkScript(test_bad_enter, (test_tensor, BadEnter()))
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"__exit__ must have four arguments", "cm"
):
self.checkScript(test_bad_exit, (test_tensor, BadExit()))
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"argument 2 of __exit__ must have Any type", "cm"
):
self.checkScript(
test_exit_incorrect_types, (test_tensor, ExitIncorrectTypes())
)
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"must return an object", '"not_object"'
):
self.checkScript(test_enter_without_object, ())
def test_with_no_grad(self):
"""
Check that torch.no_grad() works. Most of these are adapted from
corresponding tests for eager-mode no_grad.
"""
# Basic no_grad test.
def test_no_grad(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
w = x + y
return w
s = torch.jit.script(test_no_grad)
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
w = s(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
# Test assignment of a grad-less Tensor to a Tensor with gradients
# in a no_grad block.
def test_no_grad_assignment(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x[0] = y
return x
s = torch.jit.script(test_no_grad_assignment)
z = torch.randn(5)
w = s(x, z)
self.assertTrue(w.requires_grad)
self.assertIsNone(w.grad_fn)
# Check that @torch.jit.ignored functions respect no_grad when it is
# called in JIT mode.
class NoGradModule(torch.nn.Module):
@torch.jit.ignore
def adder(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
w = x + y
return w
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
w = self.adder(x, y)
return w
s = torch.jit.script(NoGradModule())
w = s(x, y)
self.assertFalse(w.requires_grad)
@skipIfTorchDynamo("Torchdynamo cannot correctly handle profiler.profile calls")
def test_with_record_function(self):
"""
Check that torch.autograd.profiler.record_function context manager is
torchscriptable.
"""
def with_rf(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.autograd.profiler.record_function("foo"):
# Nested record_function.
with torch.autograd.profiler.record_function("nested"):
a = x + y
return a
scripted = torch.jit.script(with_rf)
x, y = torch.ones(2), torch.ones(2)
with torch.autograd.profiler.profile() as p:
scripted(x, y)
# Need to call below to populate CPU children.
p.key_averages()
function_events = p.function_events
# Event with name "foo" should be recorded.
rf_events = [evt for evt in function_events if evt.name == "foo"]
self.assertEqual(len(rf_events), 1)
rf_event = rf_events[0]
child_events = rf_event.cpu_children
# Ensure we find nested record_function event
self.assertTrue("nested" in (child.name for child in child_events))
nested_function_event = [
evt for evt in function_events if evt.name == "nested"
][0]
# Nested record function should have child "aten::add"
nested_child_events = nested_function_event.cpu_children
self.assertTrue("aten::add" in (child.name for child in nested_child_events))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestWith |
python | django-extensions__django-extensions | django_extensions/management/commands/notes.py | {
"start": 372,
"end": 3000
} | class ____(BaseCommand):
help = "Show all annotations like TODO, FIXME, BUG, HACK, WARNING, NOTE or XXX "
"in your py and HTML files."
label = "annotation tag (TODO, FIXME, BUG, HACK, WARNING, NOTE, XXX)"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--tag", dest="tag", help="Search for specific tags only", action="append"
)
@signalcommand
def handle(self, *args, **options):
# don't add django internal code
apps = [
app.replace(".", "/")
for app in filter(
lambda app: not app.startswith("django.contrib"),
settings.INSTALLED_APPS,
)
]
template_dirs = get_template_setting("DIRS", [])
base_dir = getattr(settings, "BASE_DIR")
if template_dirs:
apps += template_dirs
for app_dir in apps:
if base_dir:
app_dir = os.path.join(base_dir, app_dir)
for top, dirs, files in os.walk(app_dir):
for fn in files:
if os.path.splitext(fn)[1] in (".py", ".html"):
fpath = os.path.join(top, fn)
annotation_lines = []
with open(fpath, "r") as fd:
i = 0
for line in fd.readlines():
i += 1
if ANNOTATION_RE.search(line):
tag, msg = ANNOTATION_RE.findall(line)[0]
if options["tag"]:
if tag not in map(
str.upper, map(str, options["tag"])
):
break
if ANNOTATION_END_RE.search(msg.strip()):
msg = ANNOTATION_END_RE.findall(msg.strip())[0][
0
]
annotation_lines.append(
"[%3s] %-5s %s" % (i, tag, msg.strip())
)
if annotation_lines:
self.stdout.write("%s:" % fpath)
for annotation in annotation_lines:
self.stdout.write(" * %s" % annotation)
self.stdout.write("")
| Command |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs.py | {
"start": 2502,
"end": 2693
} | class ____:
f: F = F()
g: G = G()
# Regression test for https://github.com/astral-sh/ruff/issues/19014
# These are all valid field calls and should not cause diagnostics.
@attr.define
| K |
python | realpython__materials | duck-typing-python/birds_v1.py | {
"start": 128,
"end": 256
} | class ____:
def swim(self):
print("The swan is swimming")
def fly(self):
print("The swan is flying")
| Swan |
python | django-compressor__django-compressor | compressor/storage.py | {
"start": 4862,
"end": 5187
} | class ____(LazyObject):
def _setup(self):
self._wrapped = get_storage(
alias=settings.COMPRESS_OFFLINE_MANIFEST_STORAGE_ALIAS,
storage_class=settings.COMPRESS_OFFLINE_MANIFEST_STORAGE,
)
default_offline_manifest_storage = DefaultOfflineManifestStorage()
| DefaultOfflineManifestStorage |
python | huggingface__transformers | src/transformers/models/gemma3/modeling_gemma3.py | {
"start": 20497,
"end": 22099
} | class ____(PreTrainedModel):
config: Gemma3Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = [
"Gemma3DecoderLayer",
"SiglipVisionEmbeddings",
"SiglipEncoderLayer",
"SiglipMultiheadAttentionPoolingHead",
]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Gemma3DecoderLayer,
"attentions": Gemma3Attention,
}
input_modalities = ("image", "text")
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Gemma3MultiModalProjector):
init.zeros_(module.mm_input_projection_weight)
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
elif "RMSNorm" in module.__class__.__name__:
init.zeros_(module.weight)
def _bidirectional_window_overlay(sliding_window: int) -> Callable[[int, int, int, int], bool]:
"""
Enables a bidirectional mask within the sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
"""A token can attend to any other token if their absolute distance is within
the (exclusive) sliding window size (distance < sliding_window)."""
return abs(q_idx - kv_idx) < sliding_window
return inner_mask
@auto_docstring
| Gemma3PreTrainedModel |
python | kamyu104__LeetCode-Solutions | Python/wiggle-subsequence.py | {
"start": 29,
"end": 563
} | class ____(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return len(nums)
length, up = 1, None
for i in xrange(1, len(nums)):
if nums[i - 1] < nums[i] and (up is None or up is False):
length += 1
up = True
elif nums[i - 1] > nums[i] and (up is None or up is True):
length += 1
up = False
return length
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI032.py | {
"start": 177,
"end": 283
} | class ____:
def __eq__(self, other: object) -> bool: ...
def __ne__(self, obj: object) -> int: ...
| Good |
python | walkccc__LeetCode | solutions/2969. Minimum Number of Coins for Fruits II/2969.py | {
"start": 0,
"end": 478
} | class ____:
# Same as 2944. Minimum Number of Coins for Fruits
def minimumCoins(self, prices: list[int]) -> int:
n = len(prices)
# Convert to 0-indexed for easy computation.
# dp[i] := the minimum number of coins to acquire fruits[i:]
dp = [math.inf] * n + [0]
for i in range(n - 1, -1, -1):
# Convert back to 1-indexed.
for j in range(i + 1, min((i + 1) * 2 + 1, n + 1)):
dp[i] = min(dp[i], prices[i] + dp[j])
return dp[0]
| Solution |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 30808,
"end": 31173
} | class ____(serializers.ModelSerializer):
"""Serializer to render a subproject (``ProjectRelationship``)."""
child = ChildProjectSerializer()
_links = SubprojectLinksSerializer(source="*")
class Meta:
model = ProjectRelationship
fields = [
"child",
"alias",
"_links",
]
| SubprojectSerializer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_macro03.py | {
"start": 315,
"end": 871
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("macro03.xlsm")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet("Foo")
workbook.add_vba_project(self.vba_dir + "vbaProject04.bin")
worksheet.write("A1", 123)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | openai__openai-python | src/openai/resources/fine_tuning/fine_tuning.py | {
"start": 854,
"end": 1951
} | class ____(SyncAPIResource):
@cached_property
def jobs(self) -> Jobs:
return Jobs(self._client)
@cached_property
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
@cached_property
def alpha(self) -> Alpha:
return Alpha(self._client)
@cached_property
def with_raw_response(self) -> FineTuningWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FineTuningWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FineTuningWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FineTuningWithStreamingResponse(self)
| FineTuning |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance7.py | {
"start": 255,
"end": 502
} | class ____:
element_list: list["X"]
def return_iter(
self, cls: type[T1] | tuple[type[T1], type[T2]]
) -> Iterator[T1 | T2]:
for item in self.element_list:
if isinstance(item, cls):
yield item
| X |
python | nmslib__hnswlib | tests/python/bindings_test_pickle.py | {
"start": 5544,
"end": 6494
} | class ____(unittest.TestCase):
def setUp(self):
self.ef_construction = 200
self.M = 32
self.ef = 400
self.num_elements = 1000
self.num_test_elements = 100
self.num_threads = 4
self.k = 25
self.label_err_thresh = 5 # max number of missing labels allowed per test item
self.item_err_thresh = 5 # max number of items allowed with incorrect labels
self.dists_err_thresh = 50 # for two matrices, d1 and d2, dists_err_thresh controls max
# number of value pairs that are allowed to be different in d1 and d2
# i.e., number of values that are (d1-d2)**2>1e-3
def test_inner_product_space(self):
test_space_main(self, 'ip', 16)
def test_l2_space(self):
test_space_main(self, 'l2', 53)
def test_cosine_space(self):
test_space_main(self, 'cosine', 32)
| PickleUnitTests |
python | davidhalter__parso | parso/normalizer.py | {
"start": 5153,
"end": 5597
} | class ____(Normalizer):
def __init__(self, node_to_str_map):
self._node_to_str_map = node_to_str_map
def visit(self, node):
try:
return self._node_to_str_map[node]
except KeyError:
return super().visit(node)
def visit_leaf(self, leaf):
try:
return self._node_to_str_map[leaf]
except KeyError:
return super().visit_leaf(leaf)
| RefactoringNormalizer |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 14062,
"end": 14798
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_dlp_job(self, mock_hook):
mock_hook.return_value.get_dlp_job.return_value = DlpJob()
operator = CloudDLPGetDLPJobOperator(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID, task_id="id")
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_dlp_job.assert_called_once_with(
dlp_job_id=DLP_JOB_ID,
project_id=PROJECT_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPGetDlpJobOperator |
python | huggingface__transformers | src/transformers/models/align/modeling_align.py | {
"start": 19253,
"end": 22832
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| AlignTextEmbeddings |
python | ethereum__web3.py | web3/providers/eth_tester/main.py | {
"start": 2807,
"end": 7759
} | class ____(BaseProvider):
_current_request_id = 0
_middleware = (
default_transaction_fields_middleware,
ethereum_tester_middleware,
)
ethereum_tester = None
api_endpoints: dict[str, dict[str, Callable[..., RPCResponse]]] | None = None
def __init__(
self,
ethereum_tester: Union["EthereumTester", "BaseChainBackend"] | None = None,
api_endpoints: None | (dict[str, dict[str, Callable[..., RPCResponse]]]) = None,
) -> None:
# do not import eth_tester until runtime, it is not a default dependency
super().__init__()
from eth_tester import EthereumTester # noqa: F811
from eth_tester.backends.base import (
BaseChainBackend,
)
if ethereum_tester is None:
self.ethereum_tester = EthereumTester()
elif isinstance(ethereum_tester, EthereumTester):
self.ethereum_tester = ethereum_tester
elif isinstance(ethereum_tester, BaseChainBackend):
self.ethereum_tester = EthereumTester(ethereum_tester)
else:
raise Web3TypeError(
"Expected ethereum_tester to be of type `eth_tester.EthereumTester` or "
"a subclass of `eth_tester.backends.base.BaseChainBackend`, "
f"instead received {type(ethereum_tester)}. "
"If you would like a custom eth-tester instance to test with, see the "
"eth-tester documentation. https://github.com/ethereum/eth-tester."
)
if api_endpoints is None:
# do not import eth_tester derivatives until runtime,
# it is not a default dependency
from .defaults import (
API_ENDPOINTS,
)
self.api_endpoints = API_ENDPOINTS
else:
self.api_endpoints = api_endpoints
def request_func(
self, w3: "Web3", middleware_onion: "MiddlewareOnion"
) -> Callable[..., RPCResponse]:
# override the request_func to add the ethereum_tester_middleware
middleware = middleware_onion.as_tuple_of_middleware() + tuple(self._middleware)
cache_key = self._request_func_cache[0]
if cache_key != middleware:
self._request_func_cache = (
middleware,
combine_middleware(
middleware=middleware,
w3=w3,
provider_request_fn=self.make_request,
),
)
return self._request_func_cache[-1]
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
response = _make_request(
method,
params,
self.api_endpoints,
self.ethereum_tester,
repr(self._current_request_id),
)
self._current_request_id += 1
return response
def is_connected(self, show_traceback: bool = False) -> Literal[True]:
return True
def _make_response(result: Any, response_id: str, message: str = "") -> RPCResponse:
if isinstance(result, Exception):
return cast(
RPCResponse,
{
"id": response_id,
"jsonrpc": "2.0",
"error": cast(RPCError, {"code": -32601, "message": message}),
},
)
return cast(RPCResponse, {"id": response_id, "jsonrpc": "2.0", "result": result})
def _make_request(
method: RPCEndpoint,
params: Any,
api_endpoints: dict[str, dict[str, Any]],
ethereum_tester_instance: "EthereumTester",
request_id: str,
) -> RPCResponse:
# do not import eth_tester derivatives until runtime,
# it is not a default dependency
from eth_tester.exceptions import (
TransactionFailed,
)
namespace, _, endpoint = method.partition("_")
try:
delegator = api_endpoints[namespace][endpoint]
except KeyError as e:
return _make_response(e, request_id, message=f"Unknown RPC Endpoint: {method}")
try:
response = delegator(ethereum_tester_instance, params)
except NotImplementedError as e:
return _make_response(
e,
request_id,
message=f"RPC Endpoint has not been implemented: {method}",
)
except TransactionFailed as e:
first_arg = e.args[0]
try:
# sometimes eth-tester wraps an exception in another exception
raw_error_msg = (
first_arg if not isinstance(first_arg, Exception) else first_arg.args[0]
)
reason = (
abi.decode(["string"], raw_error_msg[4:])[0]
if is_bytes(raw_error_msg)
else raw_error_msg
)
except DecodingError:
reason = first_arg
raise TransactionFailed(f"execution reverted: {reason}")
else:
return _make_response(response, request_id)
| EthereumTesterProvider |
python | ray-project__ray | python/ray/data/preprocessors/utils.py | {
"start": 713,
"end": 1124
} | class ____:
"""Encapsulates a statistical computation with optional post-processing."""
def __init__(
self,
*,
stat_fn: Union[AggregateFnV2, Callable],
post_process_fn: Callable = lambda x: x,
post_key_fn: Callable[[str], str],
):
self.stat_fn = stat_fn
self.post_process_fn = post_process_fn
self.post_key_fn = post_key_fn
| BaseStatSpec |
python | apache__airflow | providers/yandex/src/airflow/providers/yandex/operators/yq.py | {
"start": 1183,
"end": 3279
} | class ____(BaseOperator):
"""
Executes sql code using Yandex Query service.
:param sql: the SQL code to be executed as a single string
:param name: name of the query in YandexQuery
:param folder_id: cloud folder id where to create query
:param yandex_conn_id: Airflow connection ID to get parameters from
"""
operator_extra_links = (YQLink(),)
template_fields: Sequence[str] = ("sql",)
template_fields_renderers = {"sql": "sql"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#ededed"
def __init__(
self,
*,
name: str | None = None,
folder_id: str | None = None,
yandex_conn_id: str | None = None,
public_ssh_key: str | None = None,
service_account_id: str | None = None,
sql: str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.folder_id = folder_id
self.yandex_conn_id = yandex_conn_id
self.public_ssh_key = public_ssh_key
self.service_account_id = service_account_id
self.sql = sql
self.query_id: str | None = None
@cached_property
def hook(self) -> YQHook:
"""Get valid hook."""
return YQHook(
yandex_conn_id=self.yandex_conn_id,
default_folder_id=self.folder_id,
default_public_ssh_key=self.public_ssh_key,
default_service_account_id=self.service_account_id,
)
def execute(self, context: Context) -> Any:
self.query_id = self.hook.create_query(query_text=self.sql, name=self.name)
# pass to YQLink
web_link = self.hook.compose_query_web_link(self.query_id)
YQLink.persist(context, web_link)
results = self.hook.wait_results(self.query_id)
# forget query to avoid 'stop_query' in on_kill
self.query_id = None
return results
def on_kill(self) -> None:
if self.hook is not None and self.query_id is not None:
self.hook.stop_query(self.query_id)
self.hook.close()
| YQExecuteQueryOperator |
python | pytorch__pytorch | torch/_dynamo/variables/iter.py | {
"start": 1397,
"end": 8866
} | class ____(VariableTracker):
def __init__(self, value: Any, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.value = value
def __repr__(self) -> str:
return f"ItertoolsVariable({self.value})"
def as_python_constant(self) -> Any:
return self.value
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence["VariableTracker"],
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
# See also: module `torch._dynamo.polyfills.itertools`
if self.value is itertools.product:
if any(kw != "repeat" for kw in kwargs):
unimplemented(
gb_type="Unsupported kwargs for itertools.product",
context=f"call_function {self} {args} {kwargs}",
explanation=f"Expected kwargs: 'repeat', but got "
f"{','.join(set(kwargs.keys()) - {'repeat'})}",
hints=[*graph_break_hints.USER_ERROR],
)
if "repeat" in kwargs:
r = kwargs["repeat"].as_python_constant()
else:
r = 1
seqs = [arg.force_unpack_var_sequence(tx) for arg in args]
items = [
variables.TupleVariable(list(item))
for item in itertools.product(*seqs, repeat=r)
]
return variables.ListIteratorVariable(
items, # type: ignore[arg-type]
mutation_type=ValueMutationNew(),
)
elif (
self.value is itertools.combinations
and not kwargs
and len(args) == 2
and args[0].has_unpack_var_sequence(tx)
and args[1].is_python_constant()
):
iterable = args[0].unpack_var_sequence(tx)
r = args[1].as_python_constant()
items = []
for item in itertools.combinations(iterable, r):
items.append(variables.TupleVariable(list(item)))
return variables.ListIteratorVariable(
items, # type: ignore[arg-type]
mutation_type=ValueMutationNew(),
)
elif self.value is itertools.groupby:
if any(kw != "key" for kw in kwargs):
unimplemented(
gb_type="Unsupported kwargs for itertools.groupby",
context=f"call_function {self} {args} {kwargs}",
explanation=f"Expected kwargs: 'key', but got "
f"{','.join(set(kwargs.keys()) - {'key'})}",
hints=[*graph_break_hints.USER_ERROR],
)
def retrieve_const_key(key: VariableTracker) -> Any:
if isinstance(key, variables.SymNodeVariable):
return key.evaluate_expr()
elif isinstance(key, variables.ConstantVariable):
return key.as_python_constant()
else:
unimplemented(
gb_type="Unsupported key type for itertools.groupby",
context=f"call_function {self} {args} {kwargs}",
explanation="Dynamo does not know how to trace "
f"itertools.groupby with key type: {str(type(key))}. "
"We only support grouping keys that are constants (int, float, str, etc.)",
hints=[*graph_break_hints.SUPPORTABLE],
)
if len(args) == 1 and args[0].has_unpack_var_sequence(tx):
seq = args[0].unpack_var_sequence(tx)
else:
unimplemented(
gb_type="Unsupported arguments for itertools.groupby",
context=f"call_function {self} {args} {kwargs}",
explanation="Dynamo does not know how to trace "
f"itertools.groupby with args: {args} and kwargs: {kwargs}. "
"itertools.groupby expects an iterable to group and an "
"optional key function to determine groupings.",
hints=[
"Make sure the arguments to itertools.groupby are correct.",
*graph_break_hints.SUPPORTABLE,
],
)
if "key" in kwargs:
def keyfunc(x: VariableTracker) -> Any:
return retrieve_const_key(
kwargs.get("key").call_function(tx, [x], {}) # type: ignore[union-attr]
)
else:
def keyfunc(x: VariableTracker) -> Any:
return retrieve_const_key(x)
result = []
try:
# pyrefly: ignore [unbound-name]
for k, v in itertools.groupby(seq, key=keyfunc):
result.append(
variables.TupleVariable(
[
(
variables.ConstantVariable.create(k)
if variables.ConstantVariable.is_literal(k)
else k
),
variables.ListIteratorVariable(
list(v), mutation_type=ValueMutationNew()
),
],
mutation_type=ValueMutationNew(),
)
)
except Exception as e:
unimplemented(
gb_type="Unexpected failure during itertools.groupby() iteration",
context=f"call_function {self} {args} {kwargs}",
explanation="Unexpected failure in invoking function during groupby",
hints=[*graph_break_hints.SUPPORTABLE],
from_exc=e,
)
return variables.ListIteratorVariable(
result, # type: ignore[arg-type]
mutation_type=ValueMutationNew(),
)
elif self.value is itertools.repeat:
if len(args) < 2:
return variables.RepeatIteratorVariable(
*args, mutation_type=ValueMutationNew()
)
return tx.inline_user_function_return(
VariableTracker.build(tx, polyfills.repeat), args, kwargs
)
elif self.value is itertools.count:
return variables.CountIteratorVariable(
*args, mutation_type=ValueMutationNew()
)
elif (
self.value is itertools.permutations
and (len(args) == 1 or (len(args) == 2 and args[1].is_python_constant()))
and not kwargs
):
if len(args) == 2:
r = args[1].as_python_constant()
else:
r = None
items = [
variables.TupleVariable(list(item))
for item in itertools.permutations(
args[0].force_unpack_var_sequence(tx), r
)
]
return variables.ListIteratorVariable(
items, # type: ignore[arg-type]
mutation_type=ValueMutationNew(),
)
else:
return super().call_function(tx, args, kwargs)
| ItertoolsVariable |
python | doocs__leetcode | solution/2500-2599/2578.Split With Minimum Sum/Solution2.py | {
"start": 0,
"end": 146
} | class ____:
def splitNum(self, num: int) -> int:
s = sorted(str(num))
return int(''.join(s[::2])) + int(''.join(s[1::2]))
| Solution |
python | catalyst-team__catalyst | catalyst/contrib/losses/gan.py | {
"start": 51,
"end": 413
} | class ____(nn.Module):
"""
Criterion to compute simple mean of the output, completely ignoring target
(maybe useful e.g. for WGAN real/fake validity averaging.
"""
def forward(self, output, target):
"""Compute criterion.
@TODO: Docs (add typing). Contribution is welcome.
"""
return output.mean()
| MeanOutputLoss |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_resource_slice_spec.py | {
"start": 383,
"end": 12803
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'all_nodes': 'bool',
'devices': 'list[V1beta2Device]',
'driver': 'str',
'node_name': 'str',
'node_selector': 'V1NodeSelector',
'per_device_node_selection': 'bool',
'pool': 'V1beta2ResourcePool',
'shared_counters': 'list[V1beta2CounterSet]'
}
attribute_map = {
'all_nodes': 'allNodes',
'devices': 'devices',
'driver': 'driver',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'per_device_node_selection': 'perDeviceNodeSelection',
'pool': 'pool',
'shared_counters': 'sharedCounters'
}
def __init__(self, all_nodes=None, devices=None, driver=None, node_name=None, node_selector=None, per_device_node_selection=None, pool=None, shared_counters=None, local_vars_configuration=None): # noqa: E501
"""V1beta2ResourceSliceSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._all_nodes = None
self._devices = None
self._driver = None
self._node_name = None
self._node_selector = None
self._per_device_node_selection = None
self._pool = None
self._shared_counters = None
self.discriminator = None
if all_nodes is not None:
self.all_nodes = all_nodes
if devices is not None:
self.devices = devices
self.driver = driver
if node_name is not None:
self.node_name = node_name
if node_selector is not None:
self.node_selector = node_selector
if per_device_node_selection is not None:
self.per_device_node_selection = per_device_node_selection
self.pool = pool
if shared_counters is not None:
self.shared_counters = shared_counters
@property
def all_nodes(self):
"""Gets the all_nodes of this V1beta2ResourceSliceSpec. # noqa: E501
AllNodes indicates that all nodes have access to the resources in the pool. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501
:return: The all_nodes of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: bool
"""
return self._all_nodes
@all_nodes.setter
def all_nodes(self, all_nodes):
"""Sets the all_nodes of this V1beta2ResourceSliceSpec.
AllNodes indicates that all nodes have access to the resources in the pool. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501
:param all_nodes: The all_nodes of this V1beta2ResourceSliceSpec. # noqa: E501
:type: bool
"""
self._all_nodes = all_nodes
@property
def devices(self):
"""Gets the devices of this V1beta2ResourceSliceSpec. # noqa: E501
Devices lists some or all of the devices in this pool. Must not have more than 128 entries. # noqa: E501
:return: The devices of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: list[V1beta2Device]
"""
return self._devices
@devices.setter
def devices(self, devices):
"""Sets the devices of this V1beta2ResourceSliceSpec.
Devices lists some or all of the devices in this pool. Must not have more than 128 entries. # noqa: E501
:param devices: The devices of this V1beta2ResourceSliceSpec. # noqa: E501
:type: list[V1beta2Device]
"""
self._devices = devices
@property
def driver(self):
"""Gets the driver of this V1beta2ResourceSliceSpec. # noqa: E501
Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable. # noqa: E501
:return: The driver of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1beta2ResourceSliceSpec.
Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable. # noqa: E501
:param driver: The driver of this V1beta2ResourceSliceSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def node_name(self):
"""Gets the node_name of this V1beta2ResourceSliceSpec. # noqa: E501
NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node. This field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable. # noqa: E501
:return: The node_name of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1beta2ResourceSliceSpec.
NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node. This field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable. # noqa: E501
:param node_name: The node_name of this V1beta2ResourceSliceSpec. # noqa: E501
:type: str
"""
self._node_name = node_name
@property
def node_selector(self):
"""Gets the node_selector of this V1beta2ResourceSliceSpec. # noqa: E501
:return: The node_selector of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: V1NodeSelector
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this V1beta2ResourceSliceSpec.
:param node_selector: The node_selector of this V1beta2ResourceSliceSpec. # noqa: E501
:type: V1NodeSelector
"""
self._node_selector = node_selector
@property
def per_device_node_selection(self):
"""Gets the per_device_node_selection of this V1beta2ResourceSliceSpec. # noqa: E501
PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501
:return: The per_device_node_selection of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: bool
"""
return self._per_device_node_selection
@per_device_node_selection.setter
def per_device_node_selection(self, per_device_node_selection):
"""Sets the per_device_node_selection of this V1beta2ResourceSliceSpec.
PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501
:param per_device_node_selection: The per_device_node_selection of this V1beta2ResourceSliceSpec. # noqa: E501
:type: bool
"""
self._per_device_node_selection = per_device_node_selection
@property
def pool(self):
"""Gets the pool of this V1beta2ResourceSliceSpec. # noqa: E501
:return: The pool of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: V1beta2ResourcePool
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1beta2ResourceSliceSpec.
:param pool: The pool of this V1beta2ResourceSliceSpec. # noqa: E501
:type: V1beta2ResourcePool
"""
if self.local_vars_configuration.client_side_validation and pool is None: # noqa: E501
raise ValueError("Invalid value for `pool`, must not be `None`") # noqa: E501
self._pool = pool
@property
def shared_counters(self):
"""Gets the shared_counters of this V1beta2ResourceSliceSpec. # noqa: E501
SharedCounters defines a list of counter sets, each of which has a name and a list of counters available. The names of the SharedCounters must be unique in the ResourceSlice. The maximum number of counters in all sets is 32. # noqa: E501
:return: The shared_counters of this V1beta2ResourceSliceSpec. # noqa: E501
:rtype: list[V1beta2CounterSet]
"""
return self._shared_counters
@shared_counters.setter
def shared_counters(self, shared_counters):
"""Sets the shared_counters of this V1beta2ResourceSliceSpec.
SharedCounters defines a list of counter sets, each of which has a name and a list of counters available. The names of the SharedCounters must be unique in the ResourceSlice. The maximum number of counters in all sets is 32. # noqa: E501
:param shared_counters: The shared_counters of this V1beta2ResourceSliceSpec. # noqa: E501
:type: list[V1beta2CounterSet]
"""
self._shared_counters = shared_counters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ResourceSliceSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2ResourceSliceSpec):
return True
return self.to_dict() != other.to_dict()
| V1beta2ResourceSliceSpec |
python | huggingface__transformers | src/transformers/models/sam2_video/modular_sam2_video.py | {
"start": 43417,
"end": 43493
} | class ____(Sam2SinePositionEmbedding):
pass
| Sam2VideoPositionEmbeddingSine |
python | tensorflow__tensorflow | tensorflow/python/util/variable_utils_test.py | {
"start": 1769,
"end": 4288
} | class ____(test.TestCase):
def test_convert_variables_to_tensors(self):
ct = CT()
data = [resource_variable_ops.ResourceVariable(1),
resource_variable_ops.ResourceVariable(2),
constant_op.constant(3),
[4],
5,
ct]
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
results = variable_utils.convert_variables_to_tensors(data)
expected_results = [1, 2, 3, [4], 5, ct]
# Only ResourceVariables are converted to Tensors.
self.assertIsInstance(results[0], tensor.Tensor)
self.assertIsInstance(results[1], tensor.Tensor)
self.assertIsInstance(results[2], tensor.Tensor)
self.assertIsInstance(results[3], list)
self.assertIsInstance(results[4], int)
self.assertIs(results[5], ct)
results[:3] = self.evaluate(results[:3])
self.assertAllEqual(results, expected_results)
def test_convert_variables_in_composite_tensor(self):
ct2 = CT2(resource_variable_ops.ResourceVariable(1))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertIsInstance(ct2.component,
resource_variable_ops.ResourceVariable)
result = variable_utils.convert_variables_to_tensors(ct2)
self.assertIsInstance(result.component, tensor.Tensor)
self.assertAllEqual(result.component, 1)
def test_replace_variables_with_atoms(self):
data = [resource_variable_ops.ResourceVariable(1),
resource_variable_ops.ResourceVariable(2),
constant_op.constant(3),
[4],
5]
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
results = variable_utils.replace_variables_with_atoms(data)
expected_results = [0, 0, 3, [4], 5]
# Only ResourceVariables are replaced with int 0s.
self.assertIsInstance(results[0], int)
self.assertIsInstance(results[1], int)
self.assertIsInstance(results[2], tensor.Tensor)
self.assertIsInstance(results[3], list)
self.assertIsInstance(results[4], int)
results[2] = self.evaluate(results[2])
self.assertAllEqual(results, expected_results)
# Make sure 0 is a tf.nest atom with expand_composites=True.
flat_results = nest.flatten(results, expand_composites=True)
expected_flat_results = [0, 0, 3, 4, 5]
self.assertAllEqual(flat_results, expected_flat_results)
if __name__ == "__main__":
test.main()
| VariableUtilsTest |
python | getsentry__sentry | src/sentry/api/serializers/models/groupsearchviewstarred.py | {
"start": 664,
"end": 1238
} | class ____(Serializer):
def __init__(self, *args, **kwargs):
self.organization = kwargs.pop("organization", None)
super().__init__(*args, **kwargs)
def serialize(self, obj, attrs, user, **kwargs) -> GroupSearchViewStarredSerializerResponse:
serialized_view: GroupSearchViewSerializerResponse = serialize(
obj.group_search_view,
user,
serializer=GroupSearchViewSerializer(
organization=self.organization,
),
)
return serialized_view
| GroupSearchViewStarredSerializer |
python | pypa__warehouse | tests/unit/admin/views/test_macaroons.py | {
"start": 2621,
"end": 3338
} | class ____:
def test_no_macaroon_raises_404(self, db_request):
db_request.matchdict["macaroon_id"] = uuid.uuid4()
with pytest.raises(views.HTTPNotFound):
views.macaroon_detail(db_request)
def test_macaroon_exists(self, db_request, macaroon_service):
user = UserFactory.create()
_, macaroon = macaroon_service.create_macaroon(
location="test",
description="test",
scopes=[caveats.RequestUser(user_id=str(user.id))],
user_id=user.id,
)
db_request.matchdict["macaroon_id"] = macaroon.id
result = views.macaroon_detail(db_request)
assert result["macaroon"] == macaroon
| TestMacaroonDetail |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 4024,
"end": 4195
} | class ____(Proto_ContraSelf):
# This should generate a reportIncompatibleMethodOverride error.
def m(self, x: Impl_ContraSelf) -> None: ...
| Impl_ContraOtherExplicit2 |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 14816,
"end": 14890
} | class ____:
nodes: Annotated[list[ExternKernelNode], 10]
| ExternKernelNodes |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 91545,
"end": 92541
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters(
(3, 4, None),
([[1, 2], [3, 4]], 2, None),
([[1, 2], [3, 4]], [1, 2], 0),
([[1, 2], [3, 4]], [1, 2], 1),
([[1, 2], [3, 4]], 3, 1),
([[1, 2], [3, 4]], [1, 2, 3, 4], None),
(np.ones([0, 4]), 0, 1),
(np.ones([1, 2]), [2], None),
)
@test_util.with_forward_compatibility_horizons(None, [2052, 2, 7])
def testRepeat(self, array, repeats, axis):
array = np.array(array)
@def_function.function(
input_signature=[tensor_lib.TensorSpec(None, dtypes.int32)] * 2)
def repeat_fn(array, repeats):
return array_ops.repeat(array, repeats, axis)
v_tf = array_ops.repeat(constant_op.constant(array), repeats, axis)
v_tf_fn = repeat_fn(
constant_op.constant(array, dtype=dtypes.int32), repeats)
v_np = np.repeat(array, repeats, axis)
self.assertAllEqual(v_tf, v_np)
self.assertAllEqual(v_tf_fn, v_np)
| RepeatTest |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/cvs_test/package.py | {
"start": 217,
"end": 394
} | class ____(Package):
"""Mock package that uses cvs for fetching."""
homepage = "http://www.cvs-fetch-example.com"
version("cvs", cvs="to-be-filled-in-by-test")
| CvsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.