language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | airflow-core/hatch_build.py | {
"start": 1298,
"end": 5444
} | class ____(BuilderInterface[BuilderConfig, PluginManager]):
"""Custom build class for Airflow assets and git version."""
# Note that this name of the plugin MUST be `custom` - as long as we use it from custom
# hatch_build.py file and not from external plugin. See note in the:
# https://hatch.pypa.io/latest/plugins/build-hook/custom/#example
PLUGIN_NAME = "custom"
@staticmethod
def clean_dir(path: Path) -> None:
log.warning("Cleaning directory: %s", path)
shutil.rmtree(path, ignore_errors=True)
def clean(self, directory: str, versions: Iterable[str]) -> None:
work_dir = Path(self.root)
log.warning("Cleaning generated files in directory: %s", work_dir)
airflow_package_src = work_dir / "src" / "airflow"
airflow_ui_path = airflow_package_src / "ui"
fastapi_ui_path = airflow_package_src / "api_fastapi" / "auth" / "managers" / "simple" / "ui"
self.clean_dir(airflow_ui_path / "dist")
self.clean_dir(airflow_ui_path / "node_modules")
self.clean_dir(fastapi_ui_path / "dist")
self.clean_dir(fastapi_ui_path / "node_modules")
def get_version_api(self) -> dict[str, Callable[..., str]]:
"""Get custom build target for standard package preparation."""
return {"standard": self.build_standard}
def build_standard(self, directory: str, artifacts: Any, **build_data: Any) -> str:
self.write_git_version()
# run this in the parent directory of the airflow-core (i.e. airflow repo root)
work_dir = Path(self.root).parent.resolve()
cmd = ["prek", "run", "--hook-stage", "manual", "compile-ui-assets", "--all-files"]
log.warning("Running command: %s", " ".join(cmd))
run(cmd, cwd=work_dir.as_posix(), check=True)
dist_path = Path(self.root) / "src" / "airflow" / "ui" / "dist"
return dist_path.resolve().as_posix()
def get_git_version(self) -> str:
"""
Return a version to identify the state of the underlying git repo.
The version will indicate whether the head of the current git-backed working directory
is tied to a release tag or not. It will indicate the former with a 'release:{version}'
prefix and the latter with a '.dev0' suffix. Following the prefix will be a sha of the
current branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
Example pre-release version: ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
Example release version: ".release+2f635dc265e78db6708f59f68e8009abb92c1e65".
Example modified release version: ".release+2f635dc265e78db6708f59f68e8009abb92c1e65".dirty
:return: Found Airflow version in Git repo.
"""
try:
import git
try:
git_path = Path(self.root).parent.resolve() / ".git"
log.warning("Getting git version from: %s", git_path)
# Get git version from the git of the airflow root repo
repo = git.Repo(str(git_path))
except git.NoSuchPathError:
log.warning(".git directory not found: Cannot compute the git version")
return ""
except git.InvalidGitRepositoryError:
log.warning("Invalid .git directory not found: Cannot compute the git version")
return ""
except ImportError:
log.warning("gitpython not found: Cannot compute the git version.")
return ""
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return f".dev0+{sha}.dirty"
# commit is clean
return f".release:{sha}"
return "no_git_version"
def write_git_version(self) -> None:
"""Write git version to git_version file."""
version = self.get_git_version()
git_version_file = Path(self.root) / "src" / "airflow" / "git_version"
self.app.display(f"Writing version {version} to {git_version_file}")
git_version_file.write_text(version)
| CustomBuild |
python | allegroai__clearml | clearml/storage/helper.py | {
"start": 14822,
"end": 17527
} | class ____(object):
encoding = None
mode = "rw"
name = ""
newlines = "\n"
softspace = False
def __init__(self, input_iterator: Optional[Iterator[Any]] = None) -> None:
self.closed = False
self._buffer = Queue()
self._input_iterator = input_iterator
self._leftover = None
def __iter__(self) -> Any:
return self
def __next__(self) -> Any:
return self.next()
def close(self) -> None:
self.closed = True
def flush(self) -> None:
pass
def fileno(self) -> int:
return 87
def isatty(self) -> bool:
return False
def next(self) -> bytes:
while not self.closed or not self._buffer.empty():
# input stream
if self._input_iterator:
try:
chunck = next(self._input_iterator)
# make sure we always return bytes
if isinstance(chunck, str):
chunck = chunck.encode("utf-8")
return chunck
except StopIteration:
self.closed = True
raise StopIteration()
except Exception as ex:
_Driver.get_logger().error("Failed downloading: %s" % ex)
else:
# in/out stream
try:
return self._buffer.get(block=True, timeout=1.0)
except Empty:
pass
raise StopIteration()
def read(self, size: Optional[int] = None) -> bytes:
try:
data = self.next() if self._leftover is None else self._leftover
except StopIteration:
return b""
self._leftover = None
try:
while size is None or not data or len(data) < size:
chunk = self.next()
if chunk is not None:
if data is not None:
data += chunk
else:
data = chunk
except StopIteration:
pass
if size is not None and data and len(data) > size:
self._leftover = data[size:]
return data[:size]
return data
def readline(self, size: Optional[int] = None) -> str:
return self.read(size)
def readlines(self, sizehint: Optional[int] = None) -> List[str]:
pass
def truncate(self, size: Optional[int] = None) -> None:
pass
def write(self, bytes: bytes) -> None:
self._buffer.put(bytes, block=True)
def writelines(self, sequence: Iterable[str]) -> None:
for s in sequence:
self.write(s)
| _Stream |
python | pytorch__pytorch | test/test_determination.py | {
"start": 169,
"end": 4328
} | class ____(TestCase):
# Test determination on a subset of tests
TESTS = [
"test_nn",
"test_jit_profiling",
"test_jit",
"test_torch",
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"test_utils",
"test_determination",
"test_quantization",
]
@classmethod
def determined_tests(cls, changed_files):
changed_files = [os.path.normpath(path) for path in changed_files]
return [
test
for test in cls.TESTS
if run_test.should_run_test(
run_test.TARGET_DET_LIST, test, changed_files, DummyOptions()
)
]
def test_target_det_list_is_sorted(self):
# We keep TARGET_DET_LIST sorted to minimize merge conflicts
# but most importantly to allow us to comment on the absence
# of a test. It would be very difficult to add a file right
# next to a comment that says to keep it out of the list.
self.assertListEqual(run_test.TARGET_DET_LIST, sorted(run_test.TARGET_DET_LIST))
def test_config_change_only(self):
"""CI configs trigger all tests"""
self.assertEqual(self.determined_tests([".ci/pytorch/test.sh"]), self.TESTS)
def test_run_test(self):
"""run_test.py is imported by determination tests"""
self.assertEqual(
self.determined_tests(["test/run_test.py"]), ["test_determination"]
)
def test_non_code_change(self):
"""Non-code changes don't trigger any tests"""
self.assertEqual(
self.determined_tests(["CODEOWNERS", "README.md", "docs/doc.md"]), []
)
def test_cpp_file(self):
"""CPP files trigger all tests"""
self.assertEqual(
self.determined_tests(["aten/src/ATen/native/cpu/Activation.cpp"]),
self.TESTS,
)
def test_test_file(self):
"""Test files trigger themselves and dependent tests"""
self.assertEqual(
self.determined_tests(["test/test_jit.py"]),
["test_jit_profiling", "test_jit"],
)
self.assertEqual(
self.determined_tests(["test/jit/test_custom_operators.py"]),
["test_jit_profiling", "test_jit"],
)
self.assertEqual(
self.determined_tests(
["test/quantization/eager/test_quantize_eager_ptq.py"]
),
["test_quantization"],
)
def test_test_internal_file(self):
"""testing/_internal files trigger dependent tests"""
self.assertEqual(
self.determined_tests(["torch/testing/_internal/common_quantization.py"]),
[
"test_jit_profiling",
"test_jit",
"test_quantization",
],
)
def test_torch_file(self):
"""Torch files trigger dependent tests"""
self.assertEqual(
# Many files are force-imported to all tests,
# due to the layout of the project.
self.determined_tests(["torch/onnx/utils.py"]),
self.TESTS,
)
self.assertEqual(
self.determined_tests(
[
"torch/autograd/_functions/utils.py",
"torch/autograd/_functions/utils.pyi",
]
),
["test_utils"],
)
self.assertEqual(
self.determined_tests(["torch/utils/cpp_extension.py"]),
[
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"test_utils",
"test_determination",
],
)
def test_new_folder(self):
"""New top-level Python folder triggers all tests"""
self.assertEqual(self.determined_tests(["new_module/file.py"]), self.TESTS)
def test_new_test_script(self):
"""New test script triggers nothing (since it's not in run_tests.py)"""
self.assertEqual(self.determined_tests(["test/test_new_test_script.py"]), [])
if __name__ == "__main__":
run_tests()
| DeterminationTest |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 5183,
"end": 5483
} | class ____(_VectorIndexConfigUpdate):
threshold: Optional[int]
hnsw: Optional[_VectorIndexConfigHNSWUpdate]
flat: Optional[_VectorIndexConfigFlatUpdate]
@staticmethod
def vector_index_type() -> VectorIndexType:
return VectorIndexType.DYNAMIC
| _VectorIndexConfigDynamicUpdate |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 67119,
"end": 67697
} | class ____(
NamedTuple(
"_StepExpectationResultData",
[
("expectation_result", ExpectationResult),
],
)
):
def __new__(cls, expectation_result: ExpectationResult):
return super().__new__(
cls,
expectation_result=check.inst_param(
expectation_result, "expectation_result", ExpectationResult
),
)
@whitelist_for_serdes(
storage_field_names={"metadata": "metadata_entries"},
field_serializers={"metadata": MetadataFieldSerializer},
)
| StepExpectationResultData |
python | PrefectHQ__prefect | tests/experimental/test_sla.py | {
"start": 2908,
"end": 3190
} | class ____:
async def test_create_sla(self):
sla = ServiceLevelAgreement(
name="test-sla",
)
deployment_id = uuid4()
sla.set_deployment_id(deployment_id)
assert sla.owner_resource == f"prefect.deployment.{deployment_id}"
| TestSla |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_opensearch_serverless.py | {
"start": 1700,
"end": 3972
} | class ____:
EXPECTED_WAITER_NAME = "collection_available"
COLLECTION_NAME = "test_collection_name"
COLLECTION_ID = "test_collection_id"
@pytest.mark.parametrize(
("collection_name", "collection_id", "expected_pass"),
[
pytest.param(COLLECTION_NAME, COLLECTION_ID, False, id="both_provided_fails"),
pytest.param(COLLECTION_NAME, None, True, id="only_name_provided_passes"),
pytest.param(None, COLLECTION_ID, True, id="only_id_provided_passes"),
],
)
def test_serialization(self, collection_name, collection_id, expected_pass):
"""Assert that arguments and classpath are correctly serialized."""
call_args = prune_dict({"collection_id": collection_id, "collection_name": collection_name})
if expected_pass:
trigger = OpenSearchServerlessCollectionActiveTrigger(**call_args)
classpath, kwargs = trigger.serialize()
assert classpath == BASE_TRIGGER_CLASSPATH + "OpenSearchServerlessCollectionActiveTrigger"
if call_args.get("collection_name"):
assert kwargs.get("collection_name") == self.COLLECTION_NAME
if call_args.get("collection_id"):
assert kwargs.get("collection_id") == self.COLLECTION_ID
if not expected_pass:
with pytest.raises(
AttributeError, match="Either collection_ids or collection_names must be provided, not both."
):
OpenSearchServerlessCollectionActiveTrigger(**call_args)
@pytest.mark.asyncio
@mock.patch.object(OpenSearchServerlessHook, "get_waiter")
@mock.patch.object(OpenSearchServerlessHook, "get_async_conn")
async def test_run_success(self, mock_async_conn, mock_get_waiter):
mock_async_conn.__aenter__.return_value = mock.MagicMock()
mock_get_waiter().wait = AsyncMock()
trigger = OpenSearchServerlessCollectionActiveTrigger(collection_id=self.COLLECTION_ID)
generator = trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({"status": "success", "collection_id": self.COLLECTION_ID})
assert mock_get_waiter().wait.call_count == 1
| TestOpenSearchServerlessCollectionActiveTrigger |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py | {
"start": 645,
"end": 12931
} | class ____(CustomLLM):
"""
Predibase LLM.
To use, you should have the ``predibase`` python package installed,
and have your Predibase API key.
The `model_name` parameter is the Predibase "serverless" base_model ID
(see https://docs.predibase.com/user-guide/inference/models for the catalog).
An optional `adapter_id` parameter is the Predibase ID or the HuggingFace ID
of a fine-tuned LLM adapter, whose base model is the `model` parameter; the
fine-tuned adapter must be compatible with its base model; otherwise, an
error is raised. If the fine-tuned adapter is hosted at Predibase,
`adapter_version` must be specified.
Examples:
`pip install llama-index-llms-predibase`
```python
import os
os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
from llama_index.llms.predibase import PredibaseLLM
llm = PredibaseLLM(
model_name="mistral-7b",
predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)
adapter_id="my-adapter-id", # optional parameter
adapter_version=3, # optional parameter (applies to Predibase only)
api_token, # optional parameter for accessing services hosting adapters (e.g., HuggingFace)
temperature=0.3,
max_new_tokens=512,
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model_name: str = Field(description="The Predibase base model to use.")
predibase_api_key: str = Field(description="The Predibase API key to use.")
predibase_sdk_version: str = Field(
default=None,
description="The optional version (string) of the Predibase SDK (defaults to the latest if not specified).",
)
adapter_id: str = Field(
default=None,
description="The optional Predibase ID or HuggingFace ID of a fine-tuned adapter to use.",
)
adapter_version: str = Field(
default=None,
description="The optional version number of fine-tuned adapter use (applies to Predibase only).",
)
api_token: str = Field(
default=None,
description="The adapter hosting service API key to use.",
)
max_new_tokens: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The number of tokens to generate.",
gt=0,
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use for sampling.",
ge=0.0,
le=1.0,
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The number of context tokens available to the LLM.",
gt=0,
)
_client: Any = PrivateAttr()
def __init__(
self,
model_name: str,
predibase_api_key: Optional[str] = None,
predibase_sdk_version: Optional[str] = None,
adapter_id: Optional[str] = None,
adapter_version: Optional[int] = None,
api_token: Optional[str] = None,
max_new_tokens: int = DEFAULT_NUM_OUTPUTS,
temperature: float = DEFAULT_TEMPERATURE,
context_window: int = DEFAULT_CONTEXT_WINDOW,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
predibase_api_key = (
predibase_api_key
if predibase_api_key
else os.environ.get("PREDIBASE_API_TOKEN")
)
if not predibase_api_key:
raise ValueError(
'Your "PREDIBASE_API_TOKEN" is empty. Please generate a valid "PREDIBASE_API_TOKEN" in your Predibase account.'
)
super().__init__(
model_name=model_name,
predibase_api_key=predibase_api_key,
predibase_sdk_version=predibase_sdk_version,
adapter_id=adapter_id,
adapter_version=adapter_version,
api_token=api_token,
max_new_tokens=max_new_tokens,
temperature=temperature,
context_window=context_window,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._client: Union["PredibaseClient", "Predibase"] = self.initialize_client()
def initialize_client(
self,
) -> Union["PredibaseClient", "Predibase"]:
try:
if self._is_deprecated_sdk_version():
from predibase import PredibaseClient
from predibase.pql import get_session
from predibase.pql.api import Session
session: Session = get_session(
token=self.predibase_api_key,
gateway="https://api.app.predibase.com/v1",
serving_endpoint="serving.app.predibase.com",
)
return PredibaseClient(session=session)
from predibase import Predibase
os.environ["PREDIBASE_GATEWAY"] = "https://api.app.predibase.com"
return Predibase(api_token=self.predibase_api_key)
except ValueError as e:
raise ValueError(
'Your "PREDIBASE_API_TOKEN" is not correct. Please try again.'
) from e
@classmethod
def class_name(cls) -> str:
return "PredibaseLLM"
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_new_tokens,
model_name=self.model_name,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> "CompletionResponse":
options: Dict[str, Union[str, float]] = {
**{
"api_token": self.api_token,
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
},
**(kwargs or {}),
}
response_text: str
if self._is_deprecated_sdk_version():
from predibase.pql.api import ServerResponseError
from predibase.resource.llm.interface import (
HuggingFaceLLM,
LLMDeployment,
)
from predibase.resource.llm.response import GeneratedResponse
from predibase.resource.model import Model
base_llm_deployment: LLMDeployment = self._client.LLM(
uri=f"pb://deployments/{self.model_name}"
)
result: GeneratedResponse
if self.adapter_id:
"""
Attempt to retrieve the fine-tuned adapter from a Predibase repository.
If absent, then load the fine-tuned adapter from a HuggingFace repository.
"""
adapter_model: Union[Model, HuggingFaceLLM]
try:
adapter_model = self._client.get_model(
name=self.adapter_id,
version=self.adapter_version,
model_id=None,
)
except ServerResponseError:
# Predibase does not recognize the adapter ID (query HuggingFace).
adapter_model = self._client.LLM(uri=f"hf://{self.adapter_id}")
result = base_llm_deployment.with_adapter(model=adapter_model).generate(
prompt=prompt,
options=options,
)
else:
result = base_llm_deployment.generate(
prompt=prompt,
options=options,
)
response_text = result.response
else:
import requests
from lorax.client import Client as LoraxClient
from lorax.errors import GenerationError
from lorax.types import Response
lorax_client: LoraxClient = self._client.deployments.client(
deployment_ref=self.model_name
)
response: Response
if self.adapter_id:
"""
Attempt to retrieve the fine-tuned adapter from a Predibase repository.
If absent, then load the fine-tuned adapter from a HuggingFace repository.
"""
if self.adapter_version:
# Since the adapter version is provided, query the Predibase repository.
pb_adapter_id: str = f"{self.adapter_id}/{self.adapter_version}"
options.pop("api_token", None)
try:
response = lorax_client.generate(
prompt=prompt,
adapter_id=pb_adapter_id,
**options,
)
except GenerationError as ge:
raise ValueError(
f'An adapter with the ID "{pb_adapter_id}" cannot be found in the Predibase repository of fine-tuned adapters.'
) from ge
else:
# The adapter version is omitted, hence look for the adapter ID in the HuggingFace repository.
try:
response = lorax_client.generate(
prompt=prompt,
adapter_id=self.adapter_id,
adapter_source="hub",
**options,
)
except GenerationError as ge:
raise ValueError(
f"""Either an adapter with the ID "{self.adapter_id}" cannot be found in a HuggingFace repository, \
or it is incompatible with the base model (please make sure that the adapter configuration is consistent).
"""
) from ge
else:
try:
response = lorax_client.generate(
prompt=prompt,
**options,
)
except requests.JSONDecodeError as jde:
raise ValueError(
f"""An LLM with the deployment ID "{self.model_name}" cannot be found at Predibase \
(please refer to "https://docs.predibase.com/user-guide/inference/models" for the list of supported models).
"""
) from jde
response_text = response.generated_text
return CompletionResponse(text=response_text)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> "CompletionResponseGen":
raise NotImplementedError
def _is_deprecated_sdk_version(self) -> bool:
try:
import semantic_version
from semantic_version.base import Version
from predibase.version import __version__ as current_version
sdk_semver_deprecated: Version = semantic_version.Version(
version_string="2024.4.8"
)
actual_current_version: str = self.predibase_sdk_version or current_version
sdk_semver_current: Version = semantic_version.Version(
version_string=actual_current_version
)
return not (
(sdk_semver_current > sdk_semver_deprecated)
or ("+dev" in actual_current_version)
)
except ImportError as e:
raise ImportError(
"Could not import Predibase Python package. "
"Please install it with `pip install semantic_version predibase`."
) from e
| PredibaseLLM |
python | GoogleCloudPlatform__python-docs-samples | speech/microphone/transcribe_streaming_infinite_v2.py | {
"start": 1611,
"end": 13364
} | class ____:
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(
self: object,
rate: int,
chunk_size: int,
) -> None:
"""Creates a resumable microphone stream.
Args:
self: The class instance.
rate: The audio file's sampling rate.
chunk_size: The audio file's chunk size.
returns: None
"""
self._rate = rate
self.chunk_size = chunk_size
self._num_channels = 1
self._buff = queue.Queue()
self.closed = True
self.start_time = get_current_time()
self.restart_counter = 0
self.audio_input = []
self.last_audio_input = []
self.result_end_time = 0
self.is_final_end_time = 0
self.final_request_end_time = 0
self.bridging_offset = 0
self.last_transcript_was_final = False
self.new_stream = True
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=self._num_channels,
rate=self._rate,
input=True,
frames_per_buffer=self.chunk_size,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
def __enter__(self: object) -> object:
"""Opens the stream.
Args:
self: The class instance.
returns: None
"""
self.closed = False
return self
def __exit__(
self: object,
type: object,
value: object,
traceback: object,
) -> object:
"""Closes the stream and releases resources.
Args:
self: The class instance.
type: The exception type.
value: The exception value.
traceback: The exception traceback.
returns: None
"""
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(
self: object,
in_data: object,
*args: object,
**kwargs: object,
) -> object:
"""Continuously collect data from the audio stream, into the buffer.
Args:
self: The class instance.
in_data: The audio data as a bytes object.
args: Additional arguments.
kwargs: Additional arguments.
returns: None
"""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self: object) -> object:
"""Stream Audio from microphone to API and to local buffer
Args:
self: The class instance.
returns:
The data from the audio stream.
"""
while not self.closed:
data = []
if self.new_stream and self.last_audio_input:
chunk_time = STREAMING_LIMIT / len(self.last_audio_input)
if chunk_time != 0:
if self.bridging_offset < 0:
self.bridging_offset = 0
if self.bridging_offset > self.final_request_end_time:
self.bridging_offset = self.final_request_end_time
chunks_from_ms = round(
(self.final_request_end_time - self.bridging_offset)
/ chunk_time
)
self.bridging_offset = round(
(len(self.last_audio_input) - chunks_from_ms) * chunk_time
)
for i in range(chunks_from_ms, len(self.last_audio_input)):
data.append(self.last_audio_input[i])
self.new_stream = False
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
self.audio_input.append(chunk)
if chunk is None:
return
data.append(chunk)
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
self.audio_input.append(chunk)
except queue.Empty:
break
# Enforce max streaming chunk size supported by the API
combined_size = sum(len(chunk) for chunk in data)
if combined_size <= MAX_STREAMING_CHUNK:
yield b"".join(data)
else:
run_chunks = []
run_size = 0
for chunk in data:
if len(chunk) + run_size > MAX_STREAMING_CHUNK:
yield b"".join(run_chunks)
run_chunks = [chunk]
run_size = len(chunk)
else:
run_chunks.append(chunk)
run_size += len(chunk)
if run_chunks:
yield b"".join(run_chunks)
def listen_print_loop(responses: object, stream: object) -> None:
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
Arg:
responses: The responses returned from the API.
stream: The audio stream to be processed.
"""
for response in responses:
if get_current_time() - stream.start_time > STREAMING_LIMIT:
stream.start_time = get_current_time()
break
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
result_seconds = 0
result_micros = 0
# Speech-to-text V2 result uses attribute result_end_offset instead of result_end_time
# https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/StreamingRecognitionResult
if result.result_end_offset.seconds:
result_seconds = result.result_end_offset.seconds
if result.result_end_offset.microseconds:
result_micros = result.result_end_offset.microseconds
stream.result_end_time = int((result_seconds * 1000) + (result_micros / 1000))
corrected_time = (
stream.result_end_time
- stream.bridging_offset
+ (STREAMING_LIMIT * stream.restart_counter)
)
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
if result.is_final:
sys.stdout.write(GREEN)
sys.stdout.write("\033[K")
sys.stdout.write(str(corrected_time) + ": " + transcript + "\n")
stream.is_final_end_time = stream.result_end_time
stream.last_transcript_was_final = True
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r"\b(exit|quit)\b", transcript, re.I):
sys.stdout.write(YELLOW)
sys.stdout.write("Exiting...\n")
stream.closed = True
break
else:
sys.stdout.write(RED)
sys.stdout.write("\033[K")
sys.stdout.write(str(corrected_time) + ": " + transcript + "\r")
stream.last_transcript_was_final = False
def main(project_id: str) -> None:
"""start bidirectional streaming from microphone input to speech API"""
client = SpeechClient()
recognition_config = cloud_speech_types.RecognitionConfig(
explicit_decoding_config=cloud_speech_types.ExplicitDecodingConfig(
sample_rate_hertz=SAMPLE_RATE,
encoding=cloud_speech_types.ExplicitDecodingConfig.AudioEncoding.LINEAR16,
audio_channel_count=1
),
language_codes=["en-US"],
model="long",
)
streaming_config = cloud_speech_types.StreamingRecognitionConfig(
config=recognition_config,
streaming_features=cloud_speech_types.StreamingRecognitionFeatures(
interim_results=True
)
)
config_request = cloud_speech_types.StreamingRecognizeRequest(
recognizer=f"projects/{project_id}/locations/global/recognizers/_",
streaming_config=streaming_config,
)
def requests(config: cloud_speech_types.RecognitionConfig, audio: list) -> list:
"""Helper function to generate the requests list for the streaming API.
Args:
config: The speech recognition configuration.
audio: The audio data.
Returns:
The list of requests for the streaming API.
"""
yield config
for chunk in audio:
yield cloud_speech_types.StreamingRecognizeRequest(audio=chunk)
mic_manager = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE)
print(mic_manager.chunk_size)
sys.stdout.write(YELLOW)
sys.stdout.write('\nListening, say "Quit" or "Exit" to stop.\n\n')
sys.stdout.write("End (ms) Transcript Results/Status\n")
sys.stdout.write("=====================================================\n")
with mic_manager as stream:
while not stream.closed:
sys.stdout.write(YELLOW)
sys.stdout.write(
"\n" + str(STREAMING_LIMIT * stream.restart_counter) + ": NEW REQUEST\n"
)
stream.audio_input = []
audio_generator = stream.generator()
# Transcribes the audio into text
responses_iterator = client.streaming_recognize(
requests=requests(config_request, audio_generator))
listen_print_loop(responses_iterator, stream)
if stream.result_end_time > 0:
stream.final_request_end_time = stream.is_final_end_time
stream.result_end_time = 0
stream.last_audio_input = []
stream.last_audio_input = stream.audio_input
stream.audio_input = []
stream.restart_counter = stream.restart_counter + 1
if not stream.last_transcript_was_final:
sys.stdout.write("\n")
stream.new_stream = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("project_id", help="GCP Project ID")
args = parser.parse_args()
main(args.project_id)
# [END speech_transcribe_infinite_streaming_v2]
| ResumableMicrophoneStream |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 181655,
"end": 181782
} | class ____(ChoiceCaller):
def get_make_kernel_render(self) -> Any:
raise NotImplementedError
| TritonTemplateCallerBase |
python | scikit-image__scikit-image | benchmarks/benchmark_transform.py | {
"start": 51,
"end": 411
} | class ____:
"""Benchmark for transform routines in scikit-image."""
def setup(self):
self.image = np.zeros((2000, 2000))
idx = np.arange(500, 1500)
self.image[idx[::-1], idx] = 255
self.image[idx, idx] = 255
def time_hough_line(self):
result1, result2, result3 = transform.hough_line(self.image)
| TransformSuite |
python | wireservice__csvkit | tests/test_utilities/test_csvsql.py | {
"start": 352,
"end": 10243
} | class ____(CSVKitTestCase, EmptyFileTests):
Utility = CSVSQL
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']):
launch_new_instance()
def test_options(self):
for args, message in (
(
['--db', 'sqlite:///:memory:', '--dialect', 'sqlite'],
'The --dialect option is only valid when neither --db nor --query are specified.',
),
(
['--insert'],
'The --insert option is only valid when either --db or --query is specified.',
),
(
['--db', 'sqlite:///:memory:', '--insert', '--no-create', '--overwrite'],
'The --overwrite option is only valid if --no-create is not specified.',
),
(
['--db', 'sqlite:///:memory:', '--insert', '--no-create', '--create-if-not-exists'],
'The --no-create and --create-if-not-exists options are mutually exclusive.',
),
):
with self.subTest(args=args):
self.assertError(launch_new_instance, args, message)
def test_insert_options(self):
for args in (
['--no-create'],
['--create-if-not-exists'],
['--overwrite'],
['--before-insert'],
['--after-insert'],
['--chunk-size', '1'],
):
with self.subTest(args=args):
self.assertError(
launch_new_instance,
args,
f'The {args[0]} option is only valid if --insert is also specified.'
)
def setUp(self):
self.db_file = 'foo.db'
def tearDown(self):
if os.path.exists(self.db_file):
os.remove(self.db_file)
@unittest.skipIf(os.name != 'nt', 'Windows only')
def test_glob(self):
sql = self.get_output(['examples/dummy?.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE dummy2 (
a BOOLEAN NOT NULL,
b DECIMAL NOT NULL,
c DECIMAL NOT NULL
);
CREATE TABLE dummy3 (
a BOOLEAN NOT NULL,
b DECIMAL NOT NULL,
c DECIMAL NOT NULL
);
''')) # noqa: W291
def test_create_table(self):
sql = self.get_output(['--tables', 'foo', 'examples/testfixed_converted.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
text VARCHAR NOT NULL,
date DATE,
integer DECIMAL,
boolean BOOLEAN,
float DECIMAL,
time DATETIME,
datetime TIMESTAMP,
empty_column BOOLEAN
);
''')) # noqa: W291
def test_no_blanks(self):
sql = self.get_output(['--tables', 'foo', 'examples/blanks.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
a BOOLEAN,
b BOOLEAN,
c BOOLEAN,
d BOOLEAN,
e BOOLEAN,
f BOOLEAN
);
''')) # noqa: W291
def test_blanks(self):
sql = self.get_output(['--tables', 'foo', '--blanks', 'examples/blanks.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
a VARCHAR NOT NULL,
b VARCHAR NOT NULL,
c VARCHAR NOT NULL,
d VARCHAR NOT NULL,
e VARCHAR NOT NULL,
f VARCHAR NOT NULL
);
''')) # noqa: W291
def test_no_inference(self):
sql = self.get_output(['--tables', 'foo', '--no-inference', 'examples/testfixed_converted.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
text VARCHAR NOT NULL,
date VARCHAR,
integer VARCHAR,
boolean VARCHAR,
float VARCHAR,
time VARCHAR,
datetime VARCHAR,
empty_column VARCHAR
);
''')) # noqa: W291
def test_no_header_row(self):
sql = self.get_output(['--tables', 'foo', '--no-header-row', 'examples/no_header_row.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
a BOOLEAN NOT NULL,
b DECIMAL NOT NULL,
c DECIMAL NOT NULL
);
''')) # noqa: W291
def test_linenumbers(self):
sql = self.get_output(['--tables', 'foo', '--linenumbers', 'examples/dummy.csv'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
a BOOLEAN NOT NULL,
b DECIMAL NOT NULL,
c DECIMAL NOT NULL
);
''')) # noqa: W291
def test_stdin(self):
input_file = io.BytesIO(b'a,b,c\n4,2,3\n')
with stdin_as_string(input_file):
sql = self.get_output(['--tables', 'foo'])
self.assertEqual(sql.replace('\t', ' '), dedent('''\
CREATE TABLE foo (
a DECIMAL NOT NULL,
b DECIMAL NOT NULL,
c DECIMAL NOT NULL
);
''')) # noqa: W291
input_file.close()
def test_stdin_and_filename(self):
input_file = io.BytesIO(b'a,b,c\n1,2,3\n')
with stdin_as_string(input_file):
sql = self.get_output(['-', 'examples/dummy.csv'])
self.assertTrue('CREATE TABLE stdin' in sql)
self.assertTrue('CREATE TABLE dummy' in sql)
input_file.close()
def test_query(self):
input_file = io.BytesIO(b'a,b,c\n1,2,3\n')
with stdin_as_string(input_file):
sql = self.get_output(['--query', 'SELECT m.usda_id, avg(i.sepal_length) AS mean_sepal_length FROM iris '
'AS i JOIN irismeta AS m ON (i.species = m.species) GROUP BY m.species',
'examples/iris.csv', 'examples/irismeta.csv'])
self.assertTrue('usda_id,mean_sepal_length' in sql)
self.assertTrue('IRSE,5.00' in sql)
self.assertTrue('IRVE2,5.936' in sql)
self.assertTrue('IRVI,6.58' in sql)
input_file.close()
def test_query_empty(self):
input_file = io.BytesIO()
with stdin_as_string(input_file):
output = self.get_output(['--query', 'SELECT 1'])
self.assertEqual(output, '1\n1\n')
input_file.close()
def test_query_text(self):
sql = self.get_output(['--query', 'SELECT text FROM testfixed_converted WHERE text LIKE "Chicago%"',
'examples/testfixed_converted.csv'])
self.assertEqual(sql,
"text\n"
"Chicago Reader\n"
"Chicago Sun-Times\n"
"Chicago Tribune\n")
def test_query_file(self):
sql = self.get_output(['--query', 'examples/test_query.sql', 'examples/testfixed_converted.csv'])
self.assertEqual(sql,
"question,text\n"
"36,©\n")
def test_query_update(self):
sql = self.get_output(['--query', 'UPDATE dummy SET a=10 WHERE a=1', '--no-inference', 'examples/dummy.csv'])
self.assertEqual(sql, '')
def test_before_after_insert(self):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert', 'examples/dummy.csv', '--before-insert',
'SELECT 1; CREATE TABLE foobar (date DATE)', '--after-insert',
'INSERT INTO dummy VALUES (0, 5, 6)'])
output_file = io.StringIO()
utility = SQL2CSV(['--db', 'sqlite:///' + self.db_file, '--query', 'SELECT * FROM foobar'], output_file)
utility.run()
output = output_file.getvalue()
output_file.close()
self.assertEqual(output, 'date\n')
output_file = io.StringIO()
utility = SQL2CSV(['--db', 'sqlite:///' + self.db_file, '--query', 'SELECT * FROM dummy'], output_file)
utility.run()
output = output_file.getvalue()
output_file.close()
self.assertEqual(output, 'a,b,c\n1,2.0,3.0\n0,5.0,6.0\n')
def test_no_prefix_unique_constraint(self):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert',
'examples/dummy.csv', '--unique-constraint', 'a'])
with self.assertRaises(IntegrityError):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert', 'examples/dummy.csv', '--no-create'])
def test_prefix_unique_constraint(self):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert',
'examples/dummy.csv', '--unique-constraint', 'a'])
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert',
'examples/dummy.csv', '--no-create', '--prefix', 'OR IGNORE'])
def test_no_create_if_not_exists(self):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert', '--tables', 'foo', 'examples/foo1.csv'])
with self.assertRaises(OperationalError):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert', '--tables', 'foo', 'examples/foo2.csv'])
def test_create_if_not_exists(self):
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert', '--tables', 'foo', 'examples/foo1.csv'])
self.get_output(['--db', 'sqlite:///' + self.db_file, '--insert', '--tables',
'foo', 'examples/foo2.csv', '--create-if-not-exists'])
| TestCSVSQL |
python | google__pytype | pytype/tests/test_base_test.py | {
"start": 6495,
"end": 6980
} | class ____(test_base.BaseTest):
def test_dep_tree(self):
foo_pyi = """
class A: pass
"""
bar_py = """
import foo
x = foo.A()
"""
deps = [("foo.pyi", foo_pyi), ("bar.py", bar_py)]
with self.DepTree(deps) as d:
self.Check("""
import foo
import bar
assert_type(bar.x, foo.A)
""")
self.assertCountEqual(os.listdir(d.path), ["foo.pyi", "bar.pyi"])
if __name__ == "__main__":
test_base.main()
| DepTreeTest |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_database_cleanup.py | {
"start": 2453,
"end": 18225
} | class ____:
"""Tests database cleanup."""
def test_should_create_cronjob_for_enabled_cleanup(self):
docs = render_chart(
values={
"databaseCleanup": {"enabled": True},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert (
jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].name", docs[0])
== "database-cleanup"
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].image", docs[0]).startswith(
"apache/airflow"
)
assert {"name": "config", "configMap": {"name": "release-name-config"}} in jmespath.search(
"spec.jobTemplate.spec.template.spec.volumes", docs[0]
)
assert {
"name": "config",
"mountPath": "/opt/airflow/airflow.cfg",
"subPath": "airflow.cfg",
"readOnly": True,
} in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
assert "successfulJobsHistoryLimit" in docs[0]["spec"]
assert "failedJobsHistoryLimit" in docs[0]["spec"]
def test_should_pass_validation_with_v1beta1_api(self):
render_chart(
values={"databaseCleanup": {"enabled": True}},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
kubernetes_version="1.16.0",
) # checks that no validation exception is raised
def test_should_change_image_when_set_airflow_image(self):
docs = render_chart(
values={
"databaseCleanup": {"enabled": True},
"images": {"airflow": {"repository": "airflow", "tag": "test"}},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert (
jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].image", docs[0])
== "airflow:test"
)
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("kind", docs[0]) == "CronJob"
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
== "foo"
)
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.nodeSelector.diskType",
docs[0],
)
== "ssd"
)
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.tolerations[0].key",
docs[0],
)
== "dynamic-pods"
)
def test_scheduler_name(self):
docs = render_chart(
values={"databaseCleanup": {"enabled": True}, "schedulerName": "airflow-scheduler"},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.schedulerName",
docs[0],
)
== "airflow-scheduler"
)
def test_retention_days_changed(self):
docs = render_chart(
values={"databaseCleanup": {"enabled": True, "retentionDays": 10}},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0]) == [
"-c",
'CLEAN_TS=$(date -d "-10 days" +"%Y-%m-%dT%H:%M:%S"); echo "Cleaning up metadata DB entries older than ${CLEAN_TS}"; exec airflow db clean --clean-before-timestamp "${CLEAN_TS}" --yes --verbose',
]
def test_default_command_and_args(self):
docs = render_chart(
values={"databaseCleanup": {"enabled": True}},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]) == [
"bash"
]
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0]) == [
"-c",
'CLEAN_TS=$(date -d "-90 days" +"%Y-%m-%dT%H:%M:%S"); echo "Cleaning up metadata DB entries older than ${CLEAN_TS}"; exec airflow db clean --clean-before-timestamp "${CLEAN_TS}" --yes --verbose',
]
@pytest.mark.parametrize(
("retention", "skip_archive", "verbose", "batch_size", "tables", "command_args"),
[
(90, False, False, None, None, ""),
(91, True, False, None, None, " --skip-archive"),
(92, False, True, None, None, " --verbose"),
(93, False, False, 200, None, " --batch-size 200"),
(94, False, False, None, ["xcom"], " --tables xcom"),
(
95,
True,
True,
500,
["task_instance", "log"],
" --skip-archive --verbose --batch-size 500 --tables task_instance,log",
),
],
)
def test_cleanup_command_options(
self, retention, skip_archive, verbose, batch_size, tables, command_args
):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"retentionDays": retention,
"skipArchive": skip_archive,
"verbose": verbose,
"batchSize": batch_size,
"tables": tables,
}
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]) == [
"bash"
]
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0]) == [
"-c",
f'CLEAN_TS=$(date -d "-{retention} days" +"%Y-%m-%dT%H:%M:%S"); echo "Cleaning up metadata DB entries older than ${{CLEAN_TS}}"; exec airflow db clean --clean-before-timestamp "${{CLEAN_TS}}" --yes{command_args}',
]
def test_should_add_extraEnvs(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"env": [{"name": "TEST_ENV_1", "value": "test_env_1"}],
},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].env", docs[0]
)
@pytest.mark.parametrize("command", [None, ["custom", "command"]])
@pytest.mark.parametrize("args", [None, ["custom", "args"]])
def test_command_and_args_overrides(self, command, args):
docs = render_chart(
values={"databaseCleanup": {"enabled": True, "command": command, "args": args}},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
if not command and not args:
assert not docs, (
"The CronJob should not be created if command and args are null even if enabled is true"
)
else:
assert command == jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]
)
assert args == jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0])
def test_command_and_args_overrides_are_templated(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"command": ["{{ .Release.Name }}"],
"args": ["{{ .Release.Service }}"],
}
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]) == [
"release-name"
]
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0]) == ["Helm"]
def test_should_set_labels_to_jobs_from_cronjob(self):
docs = render_chart(
values={
"databaseCleanup": {"enabled": True},
"labels": {"project": "airflow"},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.metadata.labels", docs[0]) == {
"tier": "airflow",
"component": "database-cleanup",
"release": "release-name",
"project": "airflow",
}
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert "test_label" in jmespath.search("spec.jobTemplate.spec.template.metadata.labels", docs[0])
assert (
jmespath.search("spec.jobTemplate.spec.template.metadata.labels", docs[0])["test_label"]
== "test_label_value"
)
def test_should_add_component_specific_annotations(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"jobAnnotations": {"test_cronjob_annotation": "test_cronjob_annotation_value"},
"podAnnotations": {"test_pod_annotation": "test_pod_annotation_value"},
},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert "test_cronjob_annotation" in jmespath.search("metadata.annotations", docs[0])
assert (
jmespath.search("metadata.annotations", docs[0])["test_cronjob_annotation"]
== "test_cronjob_annotation_value"
)
assert "test_pod_annotation" in jmespath.search(
"spec.jobTemplate.spec.template.metadata.annotations", docs[0]
)
assert (
jmespath.search("spec.jobTemplate.spec.template.metadata.annotations", docs[0])[
"test_pod_annotation"
]
== "test_pod_annotation_value"
)
def test_cleanup_resources_are_configurable(self):
resources = {
"requests": {
"cpu": "128m",
"memory": "256Mi",
},
"limits": {
"cpu": "256m",
"memory": "512Mi",
},
}
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"resources": resources,
},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert (
jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].resources", docs[0])
== resources
)
def test_should_set_job_history_limits(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"failedJobsHistoryLimit": 2,
"successfulJobsHistoryLimit": 4,
},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.failedJobsHistoryLimit", docs[0]) == 2
assert jmespath.search("spec.successfulJobsHistoryLimit", docs[0]) == 4
def test_should_set_zero_job_history_limits(self):
docs = render_chart(
values={
"databaseCleanup": {
"enabled": True,
"failedJobsHistoryLimit": 0,
"successfulJobsHistoryLimit": 0,
},
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.failedJobsHistoryLimit", docs[0]) == 0
assert jmespath.search("spec.successfulJobsHistoryLimit", docs[0]) == 0
def test_no_airflow_local_settings(self):
docs = render_chart(
values={
"databaseCleanup": {"enabled": True},
"airflowLocalSettings": None,
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
volume_mounts = jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0]
)
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={
"databaseCleanup": {"enabled": True},
"airflowLocalSettings": "# Well hello!",
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
def test_global_volumes_and_volume_mounts(self):
docs = render_chart(
values={
"databaseCleanup": {"enabled": True},
"volumes": [{"name": "test-volume", "emptyDir": {}}],
"volumeMounts": [{"name": "test-volume", "mountPath": "/test"}],
},
show_only=["templates/database-cleanup/database-cleanup-cronjob.yaml"],
)
assert {
"name": "test-volume",
"mountPath": "/test",
} in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
assert {
"name": "test-volume",
"emptyDir": {},
} in jmespath.search("spec.jobTemplate.spec.template.spec.volumes", docs[0])
| TestDatabaseCleanup |
python | pypa__warehouse | warehouse/manage/views/__init__.py | {
"start": 52861,
"end": 89318
} | class ____:
def __init__(self, release, request):
self.release = release
self.request = request
@view_config(request_method="GET")
def manage_project_release(self):
return {
"project": self.release.project,
"release": self.release,
"files": self.release.files.all(),
}
@view_config(
request_method="POST",
request_param=["confirm_yank_version"],
require_reauth=True,
)
def yank_project_release(self):
version = self.request.POST.get("confirm_yank_version")
yanked_reason = self.request.POST.get("yanked_reason", "")
if not version:
self.request.session.flash(
self.request._("Confirm the request"), queue="error"
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
if version != self.release.version:
self.request.session.flash(
self.request._(
"Could not yank release - "
+ f"{version!r} is not the same as {self.release.version!r}"
),
queue="error",
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
submitter_role = get_user_role_in_project(
self.release.project, self.request.user, self.request
)
self.request.db.add(
JournalEntry(
name=self.release.project.name,
action="yank release",
version=self.release.version,
submitted_by=self.request.user,
)
)
self.release.project.record_event(
tag=EventTag.Project.ReleaseYank,
request=self.request,
additional={
"submitted_by": self.request.user.username,
"canonical_version": self.release.canonical_version,
"yanked_reason": yanked_reason,
},
)
self.release.yanked = True
self.release.yanked_reason = yanked_reason
self.request.session.flash(
self.request._(f"Yanked release {self.release.version!r}"), queue="success"
)
for contributor in self.release.project.users:
contributor_role = get_user_role_in_project(
self.release.project, contributor, self.request
)
send_yanked_project_release_email(
self.request,
contributor,
release=self.release,
submitter_name=self.request.user.username,
submitter_role=submitter_role,
recipient_role=contributor_role,
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.releases", project_name=self.release.project.name
)
)
@view_config(
request_method="POST",
request_param=["confirm_unyank_version"],
require_reauth=True,
)
def unyank_project_release(self):
version = self.request.POST.get("confirm_unyank_version")
if not version:
self.request.session.flash(
self.request._("Confirm the request"), queue="error"
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
if version != self.release.version:
self.request.session.flash(
self.request._(
"Could not un-yank release - "
+ f"{version!r} is not the same as {self.release.version!r}"
),
queue="error",
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
submitter_role = get_user_role_in_project(
self.release.project, self.request.user, self.request
)
self.request.db.add(
JournalEntry(
name=self.release.project.name,
action="unyank release",
version=self.release.version,
submitted_by=self.request.user,
)
)
self.release.project.record_event(
tag=EventTag.Project.ReleaseUnyank,
request=self.request,
additional={
"submitted_by": self.request.user.username,
"canonical_version": self.release.canonical_version,
},
)
self.release.yanked = False
self.release.yanked_reason = ""
self.request.session.flash(
self.request._(f"Un-yanked release {self.release.version!r}"),
queue="success",
)
for contributor in self.release.project.users:
contributor_role = get_user_role_in_project(
self.release.project, contributor, self.request
)
send_unyanked_project_release_email(
self.request,
contributor,
release=self.release,
submitter_name=self.request.user.username,
submitter_role=submitter_role,
recipient_role=contributor_role,
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.releases", project_name=self.release.project.name
)
)
@view_config(
request_method="POST",
request_param=["confirm_delete_version"],
require_reauth=True,
)
def delete_project_release(self):
if self.request.flags.enabled(AdminFlagValue.DISALLOW_DELETION):
self.request.session.flash(
self.request._(
"Project deletion temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
version = self.request.POST.get("confirm_delete_version")
if not version:
self.request.session.flash(
self.request._("Confirm the request"), queue="error"
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
if version != self.release.version:
self.request.session.flash(
self.request._(
"Could not delete release - "
+ f"{version!r} is not the same as {self.release.version!r}"
),
queue="error",
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
submitter_role = get_user_role_in_project(
self.release.project, self.request.user, self.request
)
self.request.db.add(
JournalEntry(
name=self.release.project.name,
action="remove release",
version=self.release.version,
submitted_by=self.request.user,
)
)
self.release.project.record_event(
tag=EventTag.Project.ReleaseRemove,
request=self.request,
additional={
"submitted_by": self.request.user.username,
"canonical_version": self.release.canonical_version,
},
)
self.request.db.delete(self.release)
self.request.session.flash(
self.request._(f"Deleted release {self.release.version!r}"), queue="success"
)
for contributor in self.release.project.users:
contributor_role = get_user_role_in_project(
self.release.project, contributor, self.request
)
send_removed_project_release_email(
self.request,
contributor,
release=self.release,
submitter_name=self.request.user.username,
submitter_role=submitter_role,
recipient_role=contributor_role,
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.releases", project_name=self.release.project.name
)
)
@view_config(
request_method="POST",
request_param=["confirm_project_name", "file_id"],
require_reauth=True,
)
def delete_project_release_file(self):
def _error(message):
self.request.session.flash(message, queue="error")
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
if self.request.flags.enabled(AdminFlagValue.DISALLOW_DELETION):
message = self.request._(
"Project deletion temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
)
return _error(message)
project_name = self.request.POST.get("confirm_project_name")
if not project_name:
return _error(self.request._("Confirm the request"))
try:
release_file = (
self.request.db.query(File)
.filter(
File.release == self.release,
File.id == self.request.POST.get("file_id"),
)
.one()
)
except NoResultFound:
return _error(self.request._("Could not find file"))
if project_name != self.release.project.name:
return _error(
self.request._(
"Could not delete file - " + f"{project_name!r} is not the same as "
f"{self.release.project.name!r}"
)
)
self.request.db.add(
JournalEntry(
name=self.release.project.name,
action=f"remove file {release_file.filename}",
version=self.release.version,
submitted_by=self.request.user,
)
)
release_file.record_event(
tag=EventTag.File.FileRemove,
request=self.request,
additional={
"submitted_by": self.request.user.username,
"canonical_version": self.release.canonical_version,
"filename": release_file.filename,
"project_id": str(self.release.project.id),
},
)
submitter_role = get_user_role_in_project(
self.release.project, self.request.user, self.request
)
for contributor in self.release.project.users:
contributor_role = get_user_role_in_project(
self.release.project, contributor, self.request
)
send_removed_project_release_file_email(
self.request,
contributor,
file=release_file.filename,
release=self.release,
submitter_name=self.request.user.username,
submitter_role=submitter_role,
recipient_role=contributor_role,
)
self.request.db.delete(release_file)
self.request.session.flash(
f"Deleted file {release_file.filename!r}", queue="success"
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
@view_config(
route_name="manage.project.roles",
context=Project,
renderer="warehouse:templates/manage/project/roles.html",
uses_session=True,
require_methods=False,
permission=Permissions.ProjectsWrite,
has_translations=True,
require_reauth=True,
)
def manage_project_roles(project, request, _form_class=CreateRoleForm):
organization_service = request.find_service(IOrganizationService, context=None)
user_service = request.find_service(IUserService, context=None)
# Roles, invitations, and invite collaborator form for all projects.
roles = set(request.db.query(Role).join(User).filter(Role.project == project).all())
invitations = set(
request.db.query(RoleInvitation)
.join(User)
.filter(RoleInvitation.project == project)
.all()
)
form = _form_class(request.POST, user_service=user_service)
# Team project roles and add internal collaborator form for organization projects.
enable_internal_collaborator = bool(
request.organization_access and project.organization
)
if enable_internal_collaborator:
team_project_roles = set(
request.db.query(TeamProjectRole)
.join(Team)
.filter(TeamProjectRole.project == project)
.all()
)
internal_users = set(
organization_owners(request, project.organization)
+ organization_managers(request, project.organization)
+ organization_members(request, project.organization)
)
internal_role_form = CreateInternalRoleForm(
request.POST,
team_choices=sorted(team.name for team in project.organization.teams),
user_choices=sorted(
user.username for user in internal_users if user not in project.users
),
user_service=user_service,
)
else:
team_project_roles = set()
internal_role_form = None
internal_users = set()
default_response = {
"project": project,
"roles": roles,
"invitations": invitations,
"form": form,
"enable_internal_collaborator": enable_internal_collaborator,
"team_project_roles": team_project_roles,
"internal_role_form": internal_role_form,
}
# Handle GET.
if request.method != "POST":
return default_response
# Determine which form was submitted with POST.
if enable_internal_collaborator and "is_team" in request.POST:
form = internal_role_form
# Validate form.
if not form.validate():
return default_response
# Try adding team as collaborator.
if enable_internal_collaborator and "is_team" in request.POST and form.is_team.data:
team_name = form.team_name.data
role_name = form.team_project_role_name.data
team_id = organization_service.find_teamid(project.organization.id, team_name)
team = organization_service.get_team(team_id)
# Do nothing if role already exists.
existing_role = (
request.db.query(TeamProjectRole)
.filter(TeamProjectRole.team == team, TeamProjectRole.project == project)
.first()
)
if existing_role:
request.session.flash(
request._(
"Team '${team_name}' already has ${role_name} role for project",
mapping={
"team_name": team_name,
"role_name": existing_role.role_name.value,
},
),
queue="error",
)
return default_response
# Add internal team.
organization_service.add_team_project_role(team.id, project.id, role_name)
# Add journal entry.
request.db.add(
JournalEntry(
name=project.name,
action=f"add {role_name.value} {team_name}",
submitted_by=request.user,
)
)
# Record events.
project.record_event(
tag=EventTag.Project.TeamProjectRoleAdd,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"role_name": role_name.value,
"target_team": team.name,
},
)
team.organization.record_event(
tag=EventTag.Organization.TeamProjectRoleAdd,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"project_name": project.name,
"role_name": role_name.value,
"target_team": team.name,
},
)
team.record_event(
tag=EventTag.Team.TeamProjectRoleAdd,
request=request,
additional={
"submitted_by_user_id": str(request.user.id),
"project_name": project.name,
"role_name": role_name.value,
},
)
# Send notification emails.
member_users = set(team.members)
owner_users = set(project.owners + project.organization.owners)
owner_users -= member_users
send_team_collaborator_added_email(
request,
owner_users,
team=team,
submitter=request.user,
project_name=project.name,
role=role_name.value,
)
send_added_as_team_collaborator_email(
request,
member_users,
team=team,
submitter=request.user,
project_name=project.name,
role=role_name.value,
)
# Display notification message.
request.session.flash(
request._(
(
"${team_name} now has ${role} permissions "
"for the '${project_name}' project."
),
mapping={
"team_name": team.name,
"project_name": project.name,
"role": role_name.value,
},
),
queue="success",
)
# Refresh project collaborators.
return HTTPSeeOther(request.path)
# Try adding user as collaborator.
username = form.username.data
role_name = form.role_name.data
userid = user_service.find_userid(username)
user = user_service.get_user(userid)
# Do nothing if role already exists.
existing_role = (
request.db.query(Role)
.filter(Role.user == user, Role.project == project)
.first()
)
if existing_role:
request.session.flash(
request._(
"User '${username}' already has ${role_name} role for project",
mapping={
"username": username,
"role_name": existing_role.role_name,
},
),
queue="error",
)
# Refresh project collaborators.
return HTTPSeeOther(request.path)
if enable_internal_collaborator and user in internal_users:
# Add internal member.
request.db.add(Role(user=user, project=project, role_name=role_name))
# Add journal entry.
request.db.add(
JournalEntry(
name=project.name,
action=f"add {role_name} {user.username}",
submitted_by=request.user,
)
)
# Record events.
project.record_event(
tag=EventTag.Project.RoleAdd,
request=request,
additional={
"submitted_by": request.user.username,
"role_name": role_name,
"target_user": user.username,
},
)
user.record_event(
tag=EventTag.Account.RoleAdd,
request=request,
additional={
"submitted_by": request.user.username,
"project_name": project.name,
"role_name": role_name,
},
)
# Send notification emails.
owner_users = set(project.owners + project.organization.owners)
owner_users.discard(user)
send_collaborator_added_email(
request,
owner_users,
user=user,
submitter=request.user,
project_name=project.name,
role=role_name,
)
send_added_as_collaborator_email(
request,
user,
submitter=request.user,
project_name=project.name,
role=role_name,
)
# Display notification message.
request.session.flash(
request._(
"${username} is now ${role} of the '${project_name}' project.",
mapping={
"username": username,
"project_name": project.name,
"role": role_name,
},
),
queue="success",
)
# Refresh project collaborators.
return HTTPSeeOther(request.path)
else:
# Invite external user.
token_service = request.find_service(ITokenService, name="email")
user_invite = (
request.db.query(RoleInvitation)
.filter(RoleInvitation.user == user)
.filter(RoleInvitation.project == project)
.one_or_none()
)
# Cover edge case where invite is invalid but task
# has not updated invite status
try:
invite_token = token_service.loads(user_invite.token)
except (TokenExpired, AttributeError):
invite_token = None
if user.primary_email is None or not user.primary_email.verified:
request.session.flash(
request._(
"User '${username}' does not have a verified primary email "
"address and cannot be added as a ${role_name} for project",
mapping={"username": username, "role_name": role_name},
),
queue="error",
)
elif (
user_invite
and user_invite.invite_status == RoleInvitationStatus.Pending
and invite_token
):
request.session.flash(
request._(
"User '${username}' already has an active invite. "
"Please try again later.",
mapping={"username": username},
),
queue="error",
)
else:
invite_token = token_service.dumps(
{
"action": "email-project-role-verify",
"desired_role": role_name,
"user_id": user.id,
"project_id": project.id,
"submitter_id": request.user.id,
}
)
if user_invite:
user_invite.invite_status = RoleInvitationStatus.Pending
user_invite.token = invite_token
else:
request.db.add(
RoleInvitation(
user=user,
project=project,
invite_status=RoleInvitationStatus.Pending,
token=invite_token,
)
)
request.db.add(
JournalEntry(
name=project.name,
action=f"invite {role_name} {username}",
submitted_by=request.user,
)
)
send_project_role_verification_email(
request,
user,
desired_role=role_name,
initiator_username=request.user.username,
project_name=project.name,
email_token=invite_token,
token_age=token_service.max_age,
)
project.record_event(
tag=EventTag.Project.RoleInvite,
request=request,
additional={
"submitted_by": request.user.username,
"role_name": role_name,
"target_user": username,
},
)
user.record_event(
tag=EventTag.Account.RoleInvite,
request=request,
additional={
"submitted_by": request.user.username,
"project_name": project.name,
"role_name": role_name,
},
)
request.session.flash(
request._(
"Invitation sent to '${username}'",
mapping={"username": username},
),
queue="success",
)
# Refresh project collaborators.
return HTTPSeeOther(request.path)
@view_config(
route_name="manage.project.revoke_invite",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
has_translations=True,
)
def revoke_project_role_invitation(project, request, _form_class=ChangeRoleForm):
user_service = request.find_service(IUserService, context=None)
token_service = request.find_service(ITokenService, name="email")
user = user_service.get_user(request.POST["user_id"])
try:
user_invite = (
request.db.query(RoleInvitation)
.filter(RoleInvitation.project == project)
.filter(RoleInvitation.user == user)
.one()
)
except NoResultFound:
request.session.flash(
request._("Could not find role invitation."), queue="error"
)
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
request.db.delete(user_invite)
try:
token_data = token_service.loads(user_invite.token)
except TokenExpired:
request.session.flash(request._("Invitation already expired."), queue="success")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
role_name = token_data.get("desired_role")
request.db.add(
JournalEntry(
name=project.name,
action=f"revoke_invite {role_name} {user.username}",
submitted_by=request.user,
)
)
project.record_event(
tag=EventTag.Project.RoleRevokeInvite,
request=request,
additional={
"submitted_by": request.user.username,
"role_name": role_name,
"target_user": user.username,
},
)
user.record_event(
tag=EventTag.Account.RoleRevokeInvite,
request=request,
additional={
"submitted_by": request.user.username,
"project_name": project.name,
"role_name": role_name,
},
)
request.session.flash(
request._(
"Invitation revoked from '${username}'.",
mapping={"username": user.username},
),
queue="success",
)
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
@view_config(
route_name="manage.project.change_role",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
has_translations=True,
require_reauth=True,
)
def change_project_role(project, request, _form_class=ChangeRoleForm):
form = _form_class(request.POST)
if form.validate():
role_id = request.POST["role_id"]
try:
role = (
request.db.query(Role)
.join(User)
.filter(Role.id == role_id, Role.project == project)
.one()
)
if role.role_name == "Owner" and role.user == request.user:
request.session.flash("Cannot remove yourself as Owner", queue="error")
else:
request.db.add(
JournalEntry(
name=project.name,
action="change {} {} to {}".format(
role.role_name, role.user.username, form.role_name.data
),
submitted_by=request.user,
)
)
role.role_name = form.role_name.data
project.record_event(
tag=EventTag.Project.RoleChange,
request=request,
additional={
"submitted_by": request.user.username,
"role_name": form.role_name.data,
"target_user": role.user.username,
},
)
role.user.record_event(
tag=EventTag.Account.RoleChange,
request=request,
additional={
"submitted_by": request.user.username,
"project_name": project.name,
"role_name": form.role_name.data,
},
)
owner_users = set(project_owners(request, project))
# Don't send owner notification email to new user
# if they are now an owner
owner_users.discard(role.user)
send_collaborator_role_changed_email(
request,
owner_users,
user=role.user,
submitter=request.user,
project_name=project.name,
role=role.role_name,
)
send_role_changed_as_collaborator_email(
request,
role.user,
submitter=request.user,
project_name=project.name,
role=role.role_name,
)
request.session.flash("Changed role", queue="success")
except NoResultFound:
request.session.flash("Could not find role", queue="error")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
@view_config(
route_name="manage.project.delete_role",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
has_translations=True,
require_reauth=True,
)
def delete_project_role(project, request):
try:
role = (
request.db.query(Role)
.join(User)
.filter(Role.project == project)
.filter(Role.id == request.POST["role_id"])
.one()
)
projects_sole_owned = {
project.name for project in user_projects(request)["projects_sole_owned"]
}
removing_self = role.role_name == "Owner" and role.user == request.user
is_sole_owner = project.name in projects_sole_owned
if removing_self and is_sole_owner:
request.session.flash("Cannot remove yourself as Sole Owner", queue="error")
else:
request.db.delete(role)
request.db.add(
JournalEntry(
name=project.name,
action=f"remove {role.role_name} {role.user.username}",
submitted_by=request.user,
)
)
project.record_event(
tag=EventTag.Project.RoleRemove,
request=request,
additional={
"submitted_by": request.user.username,
"role_name": role.role_name,
"target_user": role.user.username,
},
)
owner_users = set(project_owners(request, project))
# Don't send owner notification email to new user
# if they are now an owner
owner_users.discard(role.user)
send_collaborator_removed_email(
request,
owner_users,
user=role.user,
submitter=request.user,
project_name=project.name,
)
send_removed_as_collaborator_email(
request, role.user, submitter=request.user, project_name=project.name
)
request.session.flash("Removed collaborator", queue="success")
if removing_self:
return HTTPSeeOther(request.route_path("manage.projects"))
except NoResultFound:
request.session.flash("Could not find role", queue="error")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
@view_config(
route_name="manage.project.history",
context=Project,
renderer="warehouse:templates/manage/project/history.html",
uses_session=True,
permission=Permissions.ProjectsRead,
has_translations=True,
)
def manage_project_history(project, request):
try:
page_num = int(request.params.get("page", 1))
except ValueError:
raise HTTPBadRequest("'page' must be an integer.")
project_events_query = (
request.db.query(Project.Event)
.join(Project.Event.source)
.filter(Project.Event.source_id == project.id)
)
file_events_query = (
request.db.query(File.Event)
.join(File.Event.source)
.filter(File.Event.additional["project_id"].astext == str(project.id))
)
events_query = project_events_query.union(file_events_query).order_by(
Project.Event.time.desc(), File.Event.time.desc()
)
events = SQLAlchemyORMPage(
events_query,
page=page_num,
items_per_page=25,
url_maker=paginate_url_factory(request),
)
if events.page_count and page_num > events.page_count:
raise HTTPNotFound
user_service = request.find_service(IUserService, context=None)
return {
"events": events,
"get_user": user_service.get_user,
"project": project,
}
@view_config(
route_name="manage.project.documentation",
context=Project,
renderer="warehouse:templates/manage/project/documentation.html",
uses_session=True,
permission=Permissions.ProjectsRead,
has_translations=True,
)
def manage_project_documentation(project, request):
return {"project": project}
@view_config(
route_name="manage.project.archive",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
)
def archive_project_view(project, request) -> HTTPSeeOther:
"""
Archive a Project. Reversible action.
"""
archive_project(project, request)
return HTTPSeeOther(
request.route_path("manage.project.settings", project_name=project.name)
)
@view_config(
route_name="manage.project.unarchive",
context=Project,
uses_session=True,
require_methods=["POST"],
permission=Permissions.ProjectsWrite,
)
def unarchive_project_view(project, request) -> HTTPSeeOther:
"""
Unarchive a Project. Reversible action.
"""
unarchive_project(project, request)
return HTTPSeeOther(
request.route_path("manage.project.settings", project_name=project.name)
)
| ManageProjectRelease |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/origin_info.py | {
"start": 5221,
"end": 9975
} | class ____(gast.NodeVisitor):
"""Annotates an AST with additional source information like file name."""
def __init__(self, root_node, source_lines, comments_map,
context_lineno, context_col_offset,
filepath):
self._source_lines = source_lines
self._comments_map = comments_map
if (hasattr(root_node, 'decorator_list') and root_node.decorator_list and
hasattr(root_node.decorator_list[0], 'lineno')):
# Typical case: functions. The line number of the first decorator
# is more accurate than the line number of the function itself in
# 3.8+. In earlier versions they coincide.
self._lineno_offset = context_lineno - root_node.decorator_list[0].lineno
else:
# Fall back to the line number of the root node.
self._lineno_offset = context_lineno - root_node.lineno
self._col_offset = context_col_offset - root_node.col_offset
self._filepath = filepath
self._function_stack = []
def _absolute_lineno(self, lineno):
return lineno + self._lineno_offset
def _absolute_col_offset(self, col_offset):
if col_offset is None:
return 0
return col_offset + self._col_offset
def _attach_origin_info(self, node):
lineno = getattr(node, 'lineno', None)
col_offset = getattr(node, 'col_offset', None)
if lineno is None:
return
if self._function_stack:
function_name = self._function_stack[-1].name
else:
function_name = None
source_code_line = self._source_lines[lineno - 1]
comment = self._comments_map.get(lineno)
loc = Location(self._filepath, self._absolute_lineno(lineno),
self._absolute_col_offset(col_offset))
origin = OriginInfo(loc, function_name, source_code_line, comment)
anno.setanno(node, 'lineno', lineno)
anno.setanno(node, anno.Basic.ORIGIN, origin)
def visit(self, node):
entered_function = False
if isinstance(node, gast.FunctionDef):
entered_function = True
self._function_stack.append(_Function(node.name))
self._attach_origin_info(node)
self.generic_visit(node)
if entered_function:
self._function_stack.pop()
def resolve(node, source, context_filepath, context_lineno, context_col_offset):
"""Adds origin information to an AST, based on the source it was loaded from.
This allows us to map the original source code line numbers to generated
source code.
Note: the AST may be a part of a larger context (e.g. a function is part of
a module that may contain other things). However, this function does not
assume the source argument contains the entire context, nor that it contains
only code corresponding to node itself. However, it assumes that node was
parsed from the given source code.
For this reason, two extra arguments are required, and they indicate the
location of the node in the original context.
Args:
node: gast.AST, the AST to annotate.
source: Text, the source code representing node.
context_filepath: Text
context_lineno: int
context_col_offset: int
"""
# TODO(mdan): Pull this to a separate utility.
code_reader = io.StringIO(source)
comments_map = {}
try:
for token in tokenize.generate_tokens(code_reader.readline):
tok_type, tok_string, loc, _, _ = token
srow, _ = loc
if tok_type == tokenize.COMMENT:
comments_map[srow] = tok_string.strip()[1:].strip()
except tokenize.TokenError:
if isinstance(node, gast.Lambda):
# Source code resolution in older Python versions is brittle for
# lambda functions, and may contain garbage.
pass
else:
raise
source_lines = source.split('\n')
visitor = OriginResolver(node, source_lines, comments_map,
context_lineno, context_col_offset,
context_filepath)
visitor.visit(node)
def resolve_entity(node, source, entity):
"""Like resolve, but extracts the context information from an entity."""
lines, lineno = tf_inspect.getsourcelines(entity)
filepath = tf_inspect.getsourcefile(entity)
# Poor man's attempt at guessing the column offset: count the leading
# whitespace. This might not work well with tabs.
definition_line = lines[0]
col_offset = len(definition_line) - len(definition_line.lstrip())
resolve(node, source, filepath, lineno, col_offset)
def copy_origin(from_node, to_node):
"""Copies the origin info from a node to another, recursively."""
origin = anno.Basic.ORIGIN.of(from_node, default=None)
if origin is None:
return
if not isinstance(to_node, (list, tuple)):
to_node = (to_node,)
for node in to_node:
for n in gast.walk(node):
anno.setanno(n, anno.Basic.ORIGIN, origin)
| OriginResolver |
python | ipython__ipython | IPython/core/interactiveshell.py | {
"start": 5964,
"end": 7661
} | class ____(types.ModuleType):
def __init__(self) -> None:
super().__init__(
"__main__",
doc="Automatically created module for the IPython interactive environment",
)
def make_main_module_type(user_ns: dict[str, Any]) -> type[_IPythonMainModuleBase]:
@undoc
class IPythonMainModule(_IPythonMainModuleBase):
"""
ModuleType that supports passing in a custom user namespace dictionary,
to be used for the module's __dict__. This is enabled by shadowing the
underlying __dict__ attribute of the module, and overriding getters and
setters to point to the custom user namespace dictionary.
The reason to do this is to allow the __main__ module to be an instance
of ModuleType, while still allowing the user namespace to be custom.
"""
@property
def __dict__(self) -> dict[str, Any]: # type: ignore[override]
return user_ns
def __setattr__(self, item: str, value: Any) -> None:
if item == "__dict__":
# Ignore this when IPython tries to set it, since we already provide it
return
user_ns[item] = value
def __getattr__(self, item: str) -> Any:
try:
return user_ns[item]
except KeyError:
raise AttributeError(f"module {self.__name__} has no attribute {item}")
def __delattr__(self, item: str) -> None:
try:
del user_ns[item]
except KeyError:
raise AttributeError(f"module {self.__name__} has no attribute {item}")
return IPythonMainModule
| _IPythonMainModuleBase |
python | ray-project__ray | python/ray/air/util/object_extensions/arrow.py | {
"start": 3348,
"end": 4358
} | class ____(pa.ExtensionArray):
"""Array class for ArrowPythonObjectType"""
def from_objects(
objects: typing.Union[np.ndarray, typing.Iterable[typing.Any]]
) -> "ArrowPythonObjectArray":
if isinstance(objects, np.ndarray):
objects = objects.tolist()
type_ = ArrowPythonObjectType()
all_dumped_bytes = []
for obj in objects:
dumped_bytes = pickle_dumps(
obj, "Error pickling object to convert to Arrow"
)
all_dumped_bytes.append(dumped_bytes)
arr = pa.array(all_dumped_bytes, type=type_.storage_type)
return type_.wrap_array(arr)
def to_numpy(
self, zero_copy_only: bool = False, writable: bool = False
) -> np.ndarray:
arr = np.empty(len(self), dtype=object)
arr[:] = self.to_pylist()
return arr
try:
pa.register_extension_type(ArrowPythonObjectType())
except pa.ArrowKeyError:
# Already registered
pass
| ArrowPythonObjectArray |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/unnecessaryIsInstance2.py | {
"start": 177,
"end": 632
} | class ____(BBase): ...
def func1(a: AFinal, b: BFinal):
# This should generate an error if reportUnnecessaryIsinstance is true.
if isinstance(a, BBase):
reveal_type(a)
# This should generate an error if reportUnnecessaryIsinstance is true.
if isinstance(a, BBase):
reveal_type(a)
def func2(a: ABase, b: BBase):
if isinstance(a, BBase):
reveal_type(a)
if isinstance(b, ABase):
reveal_type(b)
| BFinal |
python | doocs__leetcode | solution/2500-2599/2545.Sort the Students by Their Kth Score/Solution.py | {
"start": 0,
"end": 148
} | class ____:
def sortTheStudents(self, score: List[List[int]], k: int) -> List[List[int]]:
return sorted(score, key=lambda x: -x[k])
| Solution |
python | getlogbook__logbook | src/logbook/handlers.py | {
"start": 30309,
"end": 35184
} | class ____(FileHandler):
"""This handler rotates based on dates. It will name the file
after the filename you specify and the `date_format` pattern.
So for example if you configure your handler like this::
handler = TimedRotatingFileHandler("/var/log/foo.log", date_format="%Y-%m-%d")
The filenames for the logfiles will look like this:
.. code-block:: text
/var/log/foo-2010-01-10.log
/var/log/foo-2010-01-11.log
...
By default it will keep all these files around, if you want to limit
them, you can specify a `backup_count`.
You may supply an optional `rollover_format`. This allows you to specify
the format for the filenames of rolled-over files.
the format as
So for example if you configure your handler like this::
handler = TimedRotatingFileHandler(
"/var/log/foo.log",
date_format="%Y-%m-%d",
rollover_format="{basename}{ext}.{timestamp}",
)
The filenames for the logfiles will look like this::
.. code-block:: text
/var/log/foo.log.2010-01-10
/var/log/foo.log.2010-01-11
...
Finally, an optional argument `timed_filename_for_current` may be set to
false if you wish to have the current log file match the supplied filename
until it is rolled over
"""
def __init__(
self,
filename,
mode="a",
encoding="utf-8",
level=NOTSET,
format_string=None,
date_format="%Y-%m-%d",
backup_count=0,
filter=None,
bubble=False,
timed_filename_for_current=True,
rollover_format="{basename}-{timestamp}{ext}",
):
self.date_format = date_format
self.backup_count = backup_count
self.rollover_format = rollover_format
self.original_filename = filename
self.basename, self.ext = os.path.splitext(os.path.abspath(filename))
self.timed_filename_for_current = timed_filename_for_current
self._timestamp = self._get_timestamp(_datetime_factory())
if self.timed_filename_for_current:
filename = self.generate_timed_filename(self._timestamp)
elif os.path.exists(filename):
self._timestamp = self._get_timestamp(
datetime.fromtimestamp(os.stat(filename).st_mtime)
)
FileHandler.__init__(
self, filename, mode, encoding, level, format_string, True, filter, bubble
)
def _get_timestamp(self, datetime):
"""
Fetches a formatted string witha timestamp of the given datetime
"""
return datetime.strftime(self.date_format)
def generate_timed_filename(self, timestamp):
"""
Produces a filename that includes a timestamp in the format supplied
to the handler at init time.
"""
timed_filename = self.rollover_format.format(
basename=self.basename, timestamp=timestamp, ext=self.ext
)
return timed_filename
def files_to_delete(self):
"""Returns a list with the files that have to be deleted when
a rollover occours.
"""
directory = os.path.dirname(self._filename)
files = []
rollover_regex = re.compile(
self.rollover_format.format(
basename=re.escape(self.basename),
timestamp=".+",
ext=re.escape(self.ext),
)
)
for filename in os.listdir(directory):
filename = os.path.join(directory, filename)
if rollover_regex.match(filename):
files.append((os.path.getmtime(filename), filename))
files.sort()
if self.backup_count > 1:
return files[: -self.backup_count + 1]
else:
return files[:]
def perform_rollover(self, new_timestamp):
if self.stream is not None:
self.stream.close()
if not self.timed_filename_for_current and os.path.exists(self._filename):
filename = self.generate_timed_filename(self._timestamp)
os.rename(self._filename, filename)
if self.backup_count > 0:
for _, filename in self.files_to_delete():
os.remove(filename)
if self.timed_filename_for_current:
self._filename = self.generate_timed_filename(new_timestamp)
self._timestamp = new_timestamp
self._open("w")
def emit(self, record):
msg = self.format(record)
self.lock.acquire()
try:
new_timestamp = self._get_timestamp(record.time)
if new_timestamp != self._timestamp:
self.perform_rollover(new_timestamp)
self.write(self.encode(msg))
self.flush()
finally:
self.lock.release()
| TimedRotatingFileHandler |
python | openai__openai-python | src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py | {
"start": 332,
"end": 798
} | class ____(BaseModel):
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the fine-tuning job succeeded."""
data: Data
"""Event data payload."""
type: Literal["fine_tuning.job.succeeded"]
"""The type of the event. Always `fine_tuning.job.succeeded`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| FineTuningJobSucceededWebhookEvent |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor18.py | {
"start": 493,
"end": 887
} | class ____(Generic[_T1]):
def __new__(cls, *args, **kwargs) -> Self:
return super().__new__(cls, *args, **kwargs)
@overload
def __init__(self, arg: _T1) -> None: ...
@overload
def __init__(self: "ClassB[str]", arg: int) -> None: ...
def __init__(self, arg: int | ClassA | str) -> None:
pass
b1: ClassB[ClassA | str] = ClassB[str](32)
@dataclass
| ClassB |
python | ray-project__ray | release/train_tests/benchmark/runner.py | {
"start": 14534,
"end": 16112
} | class ____(TrainLoopRunner):
"""A simple runner that uses a PyTorch model, optimizer, and loss function."""
def _setup(self):
model = self.factory.get_model()
self.model = ray.train.torch.prepare_model(model)
self.loss_fn = self.factory.get_loss_fn()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
def _train_step(self, batch):
self.model.train()
input_batch, labels = batch
self.model.train()
self.optimizer.zero_grad()
out = self.model(input_batch)
loss = self.loss_fn(out, labels)
loss.backward()
self.optimizer.step()
def _validate_step(self, batch):
self.model.eval()
input_batch, labels = batch
with torch.no_grad():
out = self.model(input_batch)
loss = self.loss_fn(out, labels)
return loss
def _save_training_state(self, local_dir: str):
# Standard DDP checkpointing.
if ray.train.get_context().get_world_rank() == 0:
torch.save(self.model.state_dict(), os.path.join(local_dir, "model.pt"))
torch.save(
self.optimizer.state_dict(), os.path.join(local_dir, "optimizer.pt")
)
def _load_training_state(self, local_dir: str):
self.model.load_state_dict(
torch.load(os.path.join(local_dir, "model.pt"), map_location="cpu")
)
self.optimizer.load_state_dict(
torch.load(os.path.join(local_dir, "optimizer.pt"), map_location="cpu")
)
| VanillaTorchRunner |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 33390,
"end": 34329
} | class ____:
@mock.patch(HOOK_STR)
def test_execute(self, hook_mock):
op = DataplexCatalogDeleteEntryTypeOperator(
project_id=PROJECT_ID,
location=REGION,
entry_type_id=ENTRY_TYPE_NAME,
task_id="delete_task",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.wait_for_operation.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.delete_entry_type.assert_called_once_with(
project_id=PROJECT_ID,
location=REGION,
entry_type_id=ENTRY_TYPE_NAME,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexCatalogDeleteEntryTypeOperator |
python | numba__numba | numba/parfors/parfor.py | {
"start": 127198,
"end": 132634
} | class ____(ParforPassStates):
"""ParforFusionPass class is responsible for fusing parfors
"""
def run(self):
"""run parfor fusion pass"""
# simplify CFG of parfor body loops since nested parfors with extra
# jumps can be created with prange conversion
n_parfors = simplify_parfor_body_CFG(self.func_ir.blocks)
# simplify before fusion
simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"])
# need two rounds of copy propagation to enable fusion of long sequences
# of parfors like test_fuse_argmin (some PYTHONHASHSEED values since
# apply_copies_parfor depends on set order for creating dummy assigns)
simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"])
if self.options.fusion and n_parfors >= 2:
self.func_ir._definitions = build_definitions(self.func_ir.blocks)
self.array_analysis.equiv_sets = dict()
self.array_analysis.run(self.func_ir.blocks)
# Get parfor params to calculate reductions below.
_, parfors = get_parfor_params(self.func_ir.blocks,
self.options.fusion,
self.nested_fusion_info)
# Find reductions so that fusion can be disallowed if a
# subsequent parfor read a reduction variable.
for p in parfors:
p.redvars, p.reddict = get_parfor_reductions(self.func_ir,
p,
p.params,
self.calltypes)
# reorder statements to maximize fusion
# push non-parfors down
maximize_fusion(self.func_ir, self.func_ir.blocks, self.typemap,
up_direction=False)
dprint_func_ir(self.func_ir, "after maximize fusion down")
self.fuse_parfors(self.array_analysis,
self.func_ir.blocks,
self.func_ir,
self.typemap)
dprint_func_ir(self.func_ir, "after first fuse")
# push non-parfors up
maximize_fusion(self.func_ir, self.func_ir.blocks, self.typemap)
dprint_func_ir(self.func_ir, "after maximize fusion up")
# try fuse again after maximize
self.fuse_parfors(self.array_analysis,
self.func_ir.blocks,
self.func_ir,
self.typemap)
dprint_func_ir(self.func_ir, "after fusion")
# remove dead code after fusion to remove extra arrays and variables
simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"])
def fuse_parfors(self, array_analysis, blocks, func_ir, typemap):
for label, block in blocks.items():
equiv_set = array_analysis.get_equiv_set(label)
fusion_happened = True
while fusion_happened:
fusion_happened = False
new_body = []
i = 0
while i < len(block.body) - 1:
stmt = block.body[i]
next_stmt = block.body[i + 1]
if isinstance(stmt, Parfor) and isinstance(next_stmt, Parfor):
# we have to update equiv_set since they have changed due to
# variables being renamed before fusion.
equiv_set = array_analysis.get_equiv_set(label)
stmt.equiv_set = equiv_set
next_stmt.equiv_set = equiv_set
fused_node, fuse_report = try_fuse(equiv_set, stmt, next_stmt,
self.metadata["parfors"], func_ir, typemap)
# accumulate fusion reports
self.diagnostics.fusion_reports.append(fuse_report)
if fused_node is not None:
fusion_happened = True
self.diagnostics.fusion_info[stmt.id].extend([next_stmt.id])
new_body.append(fused_node)
self.fuse_recursive_parfor(fused_node, equiv_set, func_ir, typemap)
i += 2
continue
new_body.append(stmt)
if isinstance(stmt, Parfor):
self.fuse_recursive_parfor(stmt, equiv_set, func_ir, typemap)
i += 1
new_body.append(block.body[-1])
block.body = new_body
return
def fuse_recursive_parfor(self, parfor, equiv_set, func_ir, typemap):
blocks = wrap_parfor_blocks(parfor)
maximize_fusion(self.func_ir, blocks, self.typemap)
dprint_func_ir(self.func_ir, "after recursive maximize fusion down", blocks)
arr_analysis = array_analysis.ArrayAnalysis(self.typingctx, self.func_ir,
self.typemap, self.calltypes)
arr_analysis.run(blocks, equiv_set)
self.fuse_parfors(arr_analysis, blocks, func_ir, typemap)
unwrap_parfor_blocks(parfor)
| ParforFusionPass |
python | facebookresearch__faiss | tests/test_fast_scan_ivf.py | {
"start": 31554,
"end": 32756
} | class ____(unittest.TestCase):
IMPLEM = 12
def do_test(self, metric=faiss.METRIC_L2):
ds = datasets.SyntheticDataset(32, 750, 200, 100)
index = faiss.index_factory(ds.d, "IVF32,PQ16x4np", metric)
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
# find a reasonable radius
D, I = index.search(ds.get_queries(), 10)
radius = np.median(D[:, -1])
lims1, D1, I1 = index.range_search(ds.get_queries(), radius)
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = self.IMPLEM
lims2, D2, I2 = index2.range_search(ds.get_queries(), radius)
nmiss = 0
nextra = 0
for i in range(ds.nq):
ref = set(I1[lims1[i]: lims1[i + 1]])
new = set(I2[lims2[i]: lims2[i + 1]])
nmiss += len(ref - new)
nextra += len(new - ref)
# need some tolerance because the look-up tables are quantized
self.assertLess(nmiss, 10)
self.assertLess(nextra, 10)
def test_L2(self):
self.do_test()
def test_IP(self):
self.do_test(metric=faiss.METRIC_INNER_PRODUCT)
| TestRangeSearchImplem12 |
python | getsentry__sentry | src/sentry/eventtypes/security.py | {
"start": 141,
"end": 1142
} | class ____(BaseEvent):
def extract_metadata(self, data):
# Relay normalizes the message for security reports into the log entry
# field, so we grab the message from there.
# (https://github.com/getsentry/relay/pull/558)
message = strip(
get_path(data, "logentry", "formatted") or get_path(data, "logentry", "message")
)
return {"message": message}
def get_title(self, metadata):
# Due to a regression (https://github.com/getsentry/sentry/pull/19794)
# some events did not have message persisted but title. Because of this
# the title code has to take these into account.
return metadata.get("message") or metadata.get("title") or "<untitled>"
def get_location(self, metadata):
# Try to get location by preferring URI over origin. This covers
# all the cases below where CSP sets URI and others set origin.
return metadata.get("uri") or metadata.get("origin")
| SecurityEvent |
python | kamyu104__LeetCode-Solutions | Python/longest-increasing-path-in-a-matrix.py | {
"start": 65,
"end": 1633
} | class ____(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
if not matrix:
return 0
in_degree = [[0]*len(matrix[0]) for _ in xrange(len(matrix))]
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
for di, dj in directions:
ni, nj = i+di, j+dj
if not (0 <= ni < len(matrix) and
0 <= nj < len(matrix[0]) and
matrix[ni][nj] > matrix[i][j]):
continue
in_degree[i][j] += 1
q = []
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
if not in_degree[i][j]:
q.append((i, j))
result = 0
while q:
new_q = []
for i, j in q:
for di, dj in directions:
ni, nj = i+di, j+dj
if not (0 <= ni < len(matrix) and
0 <= nj < len(matrix[0]) and
matrix[i][j] > matrix[ni][nj]):
continue
in_degree[ni][nj] -= 1
if not in_degree[ni][nj]:
new_q.append((ni, nj))
q = new_q
result += 1
return result
# Time: O(m * n)
# Space: O(m * n)
# dfs + memoization solution
| Solution |
python | great-expectations__great_expectations | great_expectations/execution_engine/execution_engine.py | {
"start": 2298,
"end": 3295
} | class ____(DictDot):
"""
MetricComputationConfiguration is a "dataclass" object, which holds components required for metric computation.
""" # noqa: E501 # FIXME CoP
metric_configuration: MetricConfiguration
metric_fn: sa.func | F # type: ignore[valid-type] # FIXME CoP
metric_provider_kwargs: dict
compute_domain_kwargs: Optional[dict] = None
accessor_domain_kwargs: Optional[dict] = None
@override
def to_dict(self) -> dict:
"""Returns: this MetricComputationConfiguration as a Python dictionary
Returns:
(dict) representation of present object
"""
return asdict(self)
def to_json_dict(self) -> dict:
"""Returns: this MetricComputationConfiguration as a JSON dictionary
Returns:
(dict) representation of present object as JSON-compatible Python dictionary
"""
return convert_to_json_serializable(data=self.to_dict())
@dataclass
| MetricComputationConfiguration |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 33703,
"end": 34667
} | class ____(BaseModel):
"""
Asset serializer for responses.
"""
id: Annotated[int, Field(title="Id")]
name: Annotated[str, Field(title="Name")]
uri: Annotated[str, Field(title="Uri")]
group: Annotated[str, Field(title="Group")]
extra: Annotated[dict[str, JsonValue] | None, Field(title="Extra")] = None
created_at: Annotated[datetime, Field(title="Created At")]
updated_at: Annotated[datetime, Field(title="Updated At")]
scheduled_dags: Annotated[list[DagScheduleAssetReference], Field(title="Scheduled Dags")]
producing_tasks: Annotated[list[TaskOutletAssetReference], Field(title="Producing Tasks")]
consuming_tasks: Annotated[list[TaskInletAssetReference], Field(title="Consuming Tasks")]
aliases: Annotated[list[AssetAliasResponse], Field(title="Aliases")]
watchers: Annotated[list[AssetWatcherResponse], Field(title="Watchers")]
last_asset_event: LastAssetEventResponse | None = None
| AssetResponse |
python | huggingface__transformers | tests/models/rt_detr/test_modeling_rt_detr.py | {
"start": 9684,
"end": 26488
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (RTDetrModel, RTDetrForObjectDetection) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": RTDetrModel, "object-detection": RTDetrForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
test_torch_exportable = True
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "RTDetrForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = RTDetrModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=RTDetrConfig,
has_text_modality=False,
common_properties=["hidden_size", "num_attention_heads"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_rt_detr_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_rt_detr_model(*config_and_inputs)
def test_rt_detr_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_rt_detr_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="RTDetr does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="RTDetr does not use test_inputs_embeds_matches_input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="RTDetr does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="RTDetr does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="RTDetr does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
out_len = len(outputs)
correct_outlen = 13
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "RTDetrForObjectDetection":
correct_outlen += 2
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_queries,
self.model_tester.num_queries,
],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.decoder_n_points,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
# RTDetr should maintin encoder_hidden_states output
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[1].shape[-2:]),
[
self.model_tester.image_size // self.model_tester.feat_strides[-1],
self.model_tester.image_size // self.model_tester.feat_strides[-1],
],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
)
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.num_queries, self.model_tester.d_model],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
# we take the first output since last_hidden_state is the first item
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.backbone = "tf_mobilenetv3_small_075"
config.backbone_config = None
config.use_timm_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "RTDetrForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
def test_hf_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Load a pretrained HF checkpoint as backbone
config.backbone = "microsoft/resnet-18"
config.backbone_config = None
config.use_timm_backbone = False
config.use_pretrained_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "RTDetrForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_with_different_dtypes(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device).to(dtype)
model.eval()
for key, tensor in inputs_dict.items():
if tensor.dtype == torch.float32:
inputs_dict[key] = tensor.to(dtype)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
h, w = inputs_dict["pixel_values"].shape[-2:]
# convert inputs to the desired dtype
for key, tensor in inputs_dict.items():
if tensor.dtype == torch.float32:
inputs_dict[key] = tensor.to(dtype)
for model_class in self.all_model_classes:
with tempfile.TemporaryDirectory() as tmpdirname:
model_class(config).save_pretrained(tmpdirname)
model_static = model_class.from_pretrained(
tmpdirname, anchor_image_size=[h, w], device_map=torch_device, dtype=dtype
).eval()
model_dynamic = model_class.from_pretrained(
tmpdirname, anchor_image_size=None, device_map=torch_device, dtype=dtype
).eval()
self.assertIsNotNone(model_static.config.anchor_image_size)
self.assertIsNone(model_dynamic.config.anchor_image_size)
with torch.no_grad():
outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class))
outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class))
self.assertTrue(
torch.allclose(
outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4
),
f"Max diff: {(outputs_static.last_hidden_state - outputs_dynamic.last_hidden_state).abs().max()}",
)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
| RTDetrModelTest |
python | rq__rq | tests/test_registry.py | {
"start": 11660,
"end": 14579
} | class ____(RQTestCase):
def setUp(self):
super().setUp()
self.registry = DeferredJobRegistry(connection=self.connection)
def test_key(self):
self.assertEqual(self.registry.key, 'rq:deferred:default')
def test_add(self):
"""Adding a job to DeferredJobsRegistry."""
job = Job(connection=self.connection)
self.registry.add(job)
job_ids = [as_text(job_id) for job_id in self.connection.zrange(self.registry.key, 0, -1)]
self.assertEqual(job_ids, [job.id])
def test_add_with_deferred_ttl(self):
"""Job TTL defaults to +inf"""
queue = Queue(connection=self.connection)
job = queue.enqueue(say_hello)
key = self.registry.key
self.registry.add(job)
score = self.connection.zscore(key, job.id)
self.assertEqual(score, float('inf'))
timestamp = current_timestamp()
ttl = 5
self.registry.add(job, ttl=ttl)
score = self.connection.zscore(key, job.id)
self.assertLess(score, timestamp + ttl + 2)
self.assertGreater(score, timestamp + ttl - 2)
def test_register_dependency(self):
"""Ensure job creation and deletion works with DeferredJobRegistry."""
queue = Queue(connection=self.connection)
job = queue.enqueue(say_hello)
job2 = queue.enqueue(say_hello, depends_on=job)
registry = DeferredJobRegistry(connection=self.connection)
self.assertEqual(registry.get_job_ids(), [job2.id])
# When deleted, job removes itself from DeferredJobRegistry
job2.delete()
self.assertEqual(registry.get_job_ids(), [])
def test_cleanup_supports_deleted_jobs(self):
queue = Queue(connection=self.connection)
job = queue.enqueue(say_hello)
self.registry.add(job, ttl=10)
self.assertEqual(self.registry.count, 1)
job.delete(remove_from_queue=False)
self.assertEqual(self.registry.count, 1)
self.registry.cleanup(current_timestamp() + 100)
self.assertEqual(self.registry.count, 0)
def test_cleanup_moves_jobs_to_failed_job_registry(self):
"""Moving expired jobs to FailedJobRegistry."""
queue = Queue(connection=self.connection)
failed_job_registry = FailedJobRegistry(connection=self.connection)
job = queue.enqueue(say_hello)
self.connection.zadd(self.registry.key, {job.id: 2})
# Job has not been moved to FailedJobRegistry
self.registry.cleanup(1)
self.assertNotIn(job, failed_job_registry)
self.assertIn(job, self.registry)
self.registry.cleanup()
self.assertIn(job.id, failed_job_registry)
self.assertNotIn(job, self.registry)
job.refresh()
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertTrue(job.exc_info) # explanation is written to exc_info
| TestDeferredRegistry |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 91848,
"end": 92104
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_AUDIO_TOKENIZATION_MAPPING
AutoModelForAudioTokenization = auto_class_update(
AutoModelForAudioTokenization, head_doc="audio tokenization through codebooks"
)
| AutoModelForAudioTokenization |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1571815,
"end": 1573816
} | class ____(TopLevelParameter):
"""
VariableParameter schema wrapper.
Parameters
----------
name : str, :class:`ParameterName`
A unique name for the variable parameter. Parameter names should be valid JavaScript
identifiers: they should contain only alphanumeric characters (or "$", or "_") and
may not start with a digit. Reserved keywords that may not be used as parameter
names are "datum", "event", "item", and "parent".
bind : dict, :class:`Binding`, :class:`BindInput`, :class:`BindRange`, :class:`BindDirect`, :class:`BindCheckbox`, :class:`BindRadioSelect`
Binds the parameter to an external input element such as a slider, selection list or
radio button group.
expr : str, :class:`Expr`
An expression for the value of the parameter. This expression may include other
parameters, in which case the parameter will automatically update in response to
upstream parameter changes.
react : bool
A boolean flag (default ``true``) indicating if the update expression should be
automatically re-evaluated when any upstream signal dependencies update. If
``false``, the update expression will not register any dependencies on other
signals, even for initialization.
**Default value:** ``true``
value : Any
The `initial value <http://vega.github.io/vega-lite/docs/value.html>`__ of the
parameter.
**Default value:** ``undefined``
"""
_schema = {"$ref": "#/definitions/VariableParameter"}
def __init__(
self,
name: Optional[str | SchemaBase] = Undefined,
bind: Optional[SchemaBase | Map] = Undefined,
expr: Optional[str | SchemaBase] = Undefined,
react: Optional[bool] = Undefined,
value: Optional[Any] = Undefined,
**kwds,
):
super().__init__(
name=name, bind=bind, expr=expr, react=react, value=value, **kwds
)
| VariableParameter |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/list_files_test.py | {
"start": 9115,
"end": 10954
} | class ____(ListFilesTest, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
repetitions=[1, 2],
seed=[None, 42],
reshuffle_each_iteration=[True, False])))
def test(
self,
repetitions: int,
seed: Optional[int],
reshuffle_each_iteration: bool):
filenames = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'),
shuffle=False)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration)
expected = [
compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames
] * repetitions
dataset_output = self.getDatasetOutput(
dataset, requires_initialization=True)
self.assertCountEqual(dataset_output, expected)
self.assertNotEqual(dataset_output, expected)
self.assertLen(dataset_output, self.evaluate(dataset.cardinality()))
@combinations.generate(test_base.default_test_combinations())
def testShuffleNotSupported(self):
filenames = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=True)
with self.assertRaises(errors.FailedPreconditionError):
dataset = global_shuffle_op._global_shuffle(dataset)
self.getDatasetOutput(dataset, requires_initialization=True)
| ListFilesGlobalShuffleTest |
python | Netflix__metaflow | metaflow/plugins/datatools/local.py | {
"start": 174,
"end": 258
} | class ____(MetaflowException):
headline = "Invalid path"
| MetaflowLocalURLException |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 10611,
"end": 11303
} | class ____(DistanceLookupBase):
def as_sql(self, compiler, connection):
spheroid = (
len(self.rhs_params) == 2 and self.rhs_params[-1] == "spheroid"
) or None
distance_expr = connection.ops.distance_expr_for_lookup(
self.lhs, self.rhs, spheroid=spheroid
)
sql, params = compiler.compile(distance_expr.resolve_expression(compiler.query))
dist_sql, dist_params = self.process_distance(compiler, connection)
return (
"%(func)s %(op)s %(dist)s" % {"func": sql, "op": self.op, "dist": dist_sql},
(*params, *dist_params),
)
@BaseSpatialField.register_lookup
| DistanceLookupFromFunction |
python | pytorch__pytorch | test/package/package_a/fake_interface.py | {
"start": 800,
"end": 1064
} | class ____(torch.nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.one(input, input)
| UsesInterface |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 1512,
"end": 1608
} | class ____(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
| Parent |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/polling.py | {
"start": 4230,
"end": 4875
} | class ____(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.listdir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
| PollingObserverVFS |
python | ray-project__ray | doc/source/serve/doc_code/multiplexed.py | {
"start": 1503,
"end": 1877
} | class ____:
def __init__(self, downstream: DeploymentHandle):
self._h = downstream
async def __call__(self, request: starlette.requests.Request):
return await self._h.options(multiplexed_model_id="bar").remote()
serve.run(Upstream.bind(Downstream.bind()))
resp = requests.get("http://localhost:8000")
# __serve_model_composition_example_end__
| Upstream |
python | PrefectHQ__prefect | tests/test_task_worker.py | {
"start": 15448,
"end": 16225
} | class ____:
async def test_task_run_via_task_worker_respects_tags(
self, async_foo_task, prefect_client, events_pipeline
):
@task(tags=["foo", "bar"])
async def task_with_tags(x):
return x
task_worker = TaskWorker(task_with_tags)
task_run_future = task_with_tags.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
await task_worker.execute_task_run(task_run)
await events_pipeline.process_events()
updated_task_run = await prefect_client.read_task_run(
task_run_future.task_run_id
)
assert updated_task_run.state.is_completed()
assert {"foo", "bar"} == set(updated_task_run.tags)
| TestTaskWorkerTaskTags |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ActorDefinitionResourceRequirements.py | {
"start": 1022,
"end": 1379
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
default: Optional[ResourceRequirements] = Field(
None,
description="if set, these are the requirements that should be set for ALL jobs run for this actor definition.",
)
jobSpecific: Optional[List[JobTypeResourceLimit]] = None
| ActorDefinitionResourceRequirements |
python | numba__numba | numba/tests/test_numpy_support.py | {
"start": 5380,
"end": 7393
} | class ____(object):
"""
Common tests for the typing of values. Also used by test_special.
"""
def check_number_values(self, func):
"""
Test *func*() with scalar numeric values.
"""
f = func
# Standard Python types get inferred by numpy
self.assertIn(f(1), (types.int32, types.int64))
self.assertIn(f(2**31 - 1), (types.int32, types.int64))
self.assertIn(f(-2**31), (types.int32, types.int64))
self.assertIs(f(1.0), types.float64)
self.assertIs(f(1.0j), types.complex128)
self.assertIs(f(True), types.bool_)
self.assertIs(f(False), types.bool_)
# Numpy scalar types get converted by from_dtype()
for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'intc', 'uintc', 'intp', 'uintp',
'float32', 'float64', 'complex64', 'complex128',
'bool_'):
val = getattr(np, name)()
self.assertIs(f(val), getattr(types, name))
def _base_check_datetime_values(self, func, np_type, nb_type):
f = func
for unit in [
'', 'Y', 'M', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as',
]:
if unit:
t = np_type(3, unit)
else:
# "generic" datetime / timedelta
t = np_type('Nat')
tp = f(t)
# This ensures the unit hasn't been lost
self.assertEqual(tp, nb_type(unit))
def check_datetime_values(self, func):
"""
Test *func*() with np.datetime64 values.
"""
self._base_check_datetime_values(func, np.datetime64, types.NPDatetime)
def check_timedelta_values(self, func):
"""
Test *func*() with np.timedelta64 values.
"""
self._base_check_datetime_values(func, np.timedelta64,
types.NPTimedelta)
| ValueTypingTestBase |
python | chroma-core__chroma | chromadb/errors.py | {
"start": 2702,
"end": 2889
} | class ____(ChromaError):
@overrides
def code(self) -> int:
return 500
@classmethod
@overrides
def name(cls) -> str:
return "InternalError"
| InternalError |
python | django-import-export__django-import-export | tests/core/tests/test_mixins.py | {
"start": 4145,
"end": 5024
} | class ____(TestCase):
def test_get_import_formats(self):
class Format:
def __init__(self, id, can_import):
self.id = id
self.val = can_import
def can_import(self):
return self.val
class CanImportFormat(Format):
def __init__(self):
super().__init__(1, True)
class CannotImportFormat(Format):
def __init__(self):
super().__init__(2, False)
class TestBaseImportMixin(mixins.BaseImportMixin):
@property
def import_formats(self):
return [CanImportFormat, CannotImportFormat]
m = TestBaseImportMixin()
formats = m.get_import_formats()
self.assertEqual(1, len(formats))
self.assertEqual("CanImportFormat", formats[0].__name__)
| BaseImportMixinTest |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 15489,
"end": 15545
} | class ____(FacetField, DateField):
pass
| FacetDateField |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 73003,
"end": 73145
} | class ____(DjangoImageField):
default_validators = [ext_validator]
def to_python(self, value):
return value
| PassImageValidation |
python | huggingface__transformers | src/transformers/generation/continuous_batching/scheduler.py | {
"start": 13179,
"end": 16245
} | class ____(Scheduler):
"""Scheduler that prioritizes split prefill requests over decoding requests. This scheduler ensures that split
prefill requests (which are continuations of partially processed prompts) are completed before processing new
decoding requests."""
@traced
def schedule_batch(self, token_budget: int) -> list[RequestState]:
priority_states: list[RequestState] = []
second_priority_states: list[RequestState] = []
scheduled_requests = []
for state in self.active_requests.values():
# XXX: when cache is full, state can stay on `PREFILLING_SPLIT` so we need to take those into account
if state.status in [RequestStatus.PREFILLING_SPLIT, RequestStatus.SPLIT_PENDING_REMAINDER]:
priority_states.append(state)
elif state.status == RequestStatus.DECODING:
second_priority_states.append(state)
# Add waiting requests to second priority
for req_id in self.waiting_requests_order:
second_priority_states.append(self.waiting_requests[req_id])
candidates = priority_states + second_priority_states
request_ids_to_remove_from_waiting = set()
for state in candidates:
self._prepare_request_for_processing(state, token_budget, request_ids_to_remove_from_waiting)
request_len = len(state.tokens_to_process)
# If we can't allocate blocks, do not schedule the request and break if the cache is full
if not self._allocate_blocks_if_needed(state):
if self.cache.get_num_free_blocks() == 0:
break
continue
# Add the request to the scheduled requests
scheduled_requests.append(state)
# Update the token budget
token_budget -= request_len
# If using prefix sharing, we make note of the blocks that will be computed in the forward pass
if self.cache.use_prefix_sharing:
tokens_in_current_block = state.current_len() % self.cache.block_size
tokens_after_forward = tokens_in_current_block + request_len
complete_blocks = tokens_after_forward // self.cache.block_size
self.cache.blocks_to_complete[state.request_id] = complete_blocks
# Remove the request from the waiting queue and mark it as removed
req_id = state.request_id
if req_id in self.waiting_requests:
del self.waiting_requests[req_id]
request_ids_to_remove_from_waiting.add(req_id)
# Early exit of the loop if we have no token budget left
if token_budget == 0:
break
self.waiting_requests_order = deque(
[req_id for req_id in self.waiting_requests_order if req_id not in request_ids_to_remove_from_waiting]
)
return scheduled_requests
SCHEDULER_MAPPING = {
"fifo": FIFOScheduler,
"prefill_first": PrefillFirstScheduler,
}
| PrefillFirstScheduler |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 5353,
"end": 5819
} | class ____:
"""Test uk_UA bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{27}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == UkUaBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{27}", iban[2:])
| TestUkUa |
python | kamyu104__LeetCode-Solutions | Python/maximum-score-of-spliced-array.py | {
"start": 58,
"end": 601
} | class ____(object):
def maximumsSplicedArray(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
def kadane(a):
result = curr = 0
for x in a:
curr = max(curr+x, 0)
result = max(result, curr)
return result
return max(sum(nums1)+kadane((nums2[i]-nums1[i] for i in xrange(len(nums1)))),
sum(nums2)+kadane((nums1[i]-nums2[i] for i in xrange(len(nums2)))))
| Solution |
python | huggingface__transformers | src/transformers/models/blt/modular_blt.py | {
"start": 23012,
"end": 25217
} | class ____(BltPreTrainedModel):
config: BltGlobalTransformerConfig
_can_record_outputs = {
"global_attentions": OutputRecorder(BltSelfAttention, index=1, layer_name="global_transformer"),
}
def __init__(self, config: BltGlobalTransformerConfig):
super().__init__(config)
self.config = config
self.layers = nn.ModuleList()
for layer_idx in range(config.num_hidden_layers):
self.layers.append(BltTransformerLayer(config, layer_idx))
self.rotary_emb = BltRotaryEmbedding(config=config)
# Create token embedding projection (use nn.Identity() when no projection needed)
if getattr(config, "encoder_cross_output_size", None) is not None:
self.token_embedding_projection = nn.Linear(
config.encoder_cross_output_size, config.hidden_size, bias=False
)
else:
self.token_embedding_projection = nn.Identity()
self.post_init()
def forward(
self,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
):
batch_size, seq_len, _ = input_embeds.shape
hidden_states = self.token_embedding_projection(input_embeds)
hidden_states = F.dropout(hidden_states, p=self.config.dropout, training=self.training)
if position_ids is None:
position_ids = (
torch.arange(input_embeds.shape[1], device=input_embeds.device).unsqueeze(0).expand(batch_size, -1)
)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, layer in enumerate(self.layers):
hidden_states = layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return hidden_states
| BltGlobalTransformer |
python | altair-viz__altair | altair/utils/plugin_registry.py | {
"start": 1291,
"end": 2324
} | class ____(Generic[PluginT, R]):
"""
Context manager for enabling plugins.
This object lets you use enable() as a context manager to
temporarily enable a given plugin::
with plugins.enable("name"):
do_something() # 'name' plugin temporarily enabled
# plugins back to original state
"""
def __init__(
self, registry: PluginRegistry[PluginT, R], name: str, **options: Any
) -> None:
self.registry: PluginRegistry[PluginT, R] = registry
self.name: str = name
self.options: dict[str, Any] = options
self.original_state: dict[str, Any] = registry._get_state()
self.registry._enable(name, **options)
def __enter__(self) -> PluginEnabler[PluginT, R]:
return self
def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None:
self.registry._set_state(self.original_state)
def __repr__(self) -> str:
return f"{type(self.registry).__name__}.enable({self.name!r})"
| PluginEnabler |
python | openai__openai-python | src/openai/types/shared_params/response_format_json_schema.py | {
"start": 272,
"end": 1239
} | class ____(TypedDict, total=False):
name: Required[str]
"""The name of the response format.
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
of 64.
"""
description: str
"""
A description of what the response format is for, used by the model to determine
how to respond in the format.
"""
schema: Dict[str, object]
"""
The schema for the response format, described as a JSON Schema object. Learn how
to build JSON schemas [here](https://json-schema.org/).
"""
strict: Optional[bool]
"""
Whether to enable strict schema adherence when generating the output. If set to
true, the model will always follow the exact schema defined in the `schema`
field. Only a subset of JSON Schema is supported when `strict` is `true`. To
learn more, read the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
"""
| JSONSchema |
python | falconry__falcon | falcon/middleware.py | {
"start": 365,
"end": 7331
} | class ____(UniversalMiddlewareWithProcessResponse):
"""CORS Middleware.
This middleware provides a simple out-of-the box CORS policy, including handling
of preflighted requests from the browser.
See also:
* https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
* https://www.w3.org/TR/cors/#resource-processing-model
Note:
Falcon will automatically add OPTIONS responders if they are missing from the
responder instances added to the routes. When providing a custom ``on_options``
method, the ``Allow`` headers in the response should be set to the allowed
method values. If the ``Allow`` header is missing from the response,
this middleware will deny the preflight request.
This is also valid when using a sink function.
Keyword Arguments:
allow_origins (Union[str, Iterable[str]]): List of origins to allow (case
sensitive). The string ``'*'`` acts as a wildcard, matching every origin.
(default ``'*'``).
expose_headers (Optional[Union[str, Iterable[str]]]): List of additional
response headers to expose via the ``Access-Control-Expose-Headers``
header. These headers are in addition to the CORS-safelisted ones:
``Cache-Control``, ``Content-Language``, ``Content-Length``,
``Content-Type``, ``Expires``, ``Last-Modified``, ``Pragma``.
(default ``None``).
See also:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers
allow_credentials (Optional[Union[str, Iterable[str]]]): List of origins
(case sensitive) for which to allow credentials via the
``Access-Control-Allow-Credentials`` header.
The string ``'*'`` acts as a wildcard, matching every allowed origin,
while ``None`` disallows all origins. This parameter takes effect only
if the origin is allowed by the ``allow_origins`` argument.
(default ``None``).
allow_private_network (bool):
If ``True``, the server includes the
``Access-Control-Allow-Private-Network`` header in responses to
CORS preflight (OPTIONS) requests. This indicates that the resource is
willing to respond to requests from less-public IP address spaces
(e.g., from public site to private device).
(default ``False``).
See also:
https://wicg.github.io/private-network-access/#private-network-request-heading
"""
def __init__(
self,
allow_origins: str | Iterable[str] = '*',
expose_headers: str | Iterable[str] | None = None,
allow_credentials: str | Iterable[str] | None = None,
allow_private_network: bool = False,
):
if allow_origins == '*':
self.allow_origins = allow_origins
else:
if isinstance(allow_origins, str):
allow_origins = [allow_origins]
self.allow_origins = frozenset(allow_origins)
if '*' in self.allow_origins:
raise ValueError(
'The wildcard string "*" may only be passed to allow_origins as a '
'string literal, not inside an iterable.'
)
if expose_headers is not None and not isinstance(expose_headers, str):
expose_headers = ', '.join(expose_headers)
self.expose_headers = expose_headers
if allow_credentials is None:
allow_credentials = frozenset()
elif allow_credentials != '*':
if isinstance(allow_credentials, str):
allow_credentials = [allow_credentials]
allow_credentials = frozenset(allow_credentials)
if '*' in allow_credentials:
raise ValueError(
'The wildcard string "*" may only be passed to allow_credentials '
'as a string literal, not inside an iterable.'
)
self.allow_credentials = allow_credentials
self.allow_private_network = allow_private_network
def process_response(
self, req: Request, resp: Response, resource: object, req_succeeded: bool
) -> None:
"""Implement the CORS policy for all routes.
This middleware provides a simple out-of-the box CORS policy,
including handling of preflighted requests from the browser.
See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
See also: https://www.w3.org/TR/cors/#resource-processing-model
"""
origin = req.get_header('Origin')
if origin is None:
return
if self.allow_origins != '*' and origin not in self.allow_origins:
return
if resp.get_header('Access-Control-Allow-Origin') is None:
set_origin = '*' if self.allow_origins == '*' else origin
if self.allow_credentials == '*' or origin in self.allow_credentials:
set_origin = origin
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header('Access-Control-Allow-Origin', set_origin)
if self.expose_headers:
resp.set_header('Access-Control-Expose-Headers', self.expose_headers)
if (
req_succeeded
and req.method == 'OPTIONS'
and req.get_header('Access-Control-Request-Method')
):
# NOTE(kgriffs): This is a CORS preflight request. Patch the
# response accordingly.
allow = resp.get_header('Allow')
resp.delete_header('Allow')
allow_headers = req.get_header(
'Access-Control-Request-Headers', default='*'
)
if allow is None:
# there is no allow set, remove all access control headers
resp.delete_header('Access-Control-Allow-Methods')
resp.delete_header('Access-Control-Allow-Headers')
resp.delete_header('Access-Control-Max-Age')
resp.delete_header('Access-Control-Expose-Headers')
resp.delete_header('Access-Control-Allow-Origin')
else:
resp.set_header('Access-Control-Allow-Methods', allow)
resp.set_header('Access-Control-Allow-Headers', allow_headers)
resp.set_header('Access-Control-Max-Age', '86400') # 24 hours
if self.allow_private_network and (
req.get_header('Access-Control-Request-Private-Network') == 'true'
):
resp.set_header('Access-Control-Allow-Private-Network', 'true')
async def process_response_async(
self,
req: AsgiRequest,
resp: AsgiResponse,
resource: object,
req_succeeded: bool,
) -> None:
self.process_response(req, resp, resource, req_succeeded)
| CORSMiddleware |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 2661,
"end": 12733
} | class ____(Configurable):
"""Main prefilter component.
The IPython prefilter is run on all user input before it is run. The
prefilter consumes lines of input and produces transformed lines of
input.
The implementation consists of two phases:
1. Transformers
2. Checkers and handlers
Over time, we plan on deprecating the checkers and handlers and doing
everything in the transformers.
The transformers are instances of :class:`PrefilterTransformer` and have
a single method :meth:`transform` that takes a line and returns a
transformed line. The transformation can be accomplished using any
tool, but our current ones use regular expressions for speed.
After all the transformers have been run, the line is fed to the checkers,
which are instances of :class:`PrefilterChecker`. The line is passed to
the :meth:`check` method, which either returns `None` or a
:class:`PrefilterHandler` instance. If `None` is returned, the other
checkers are tried. If an :class:`PrefilterHandler` instance is returned,
the line is passed to the :meth:`handle` method of the returned
handler and no further checkers are tried.
Both transformers and checkers have a `priority` attribute, that determines
the order in which they are called. Smaller priorities are tried first.
Both transformers and checkers also have `enabled` attribute, which is
a boolean that determines if the instance is used.
Users or developers can change the priority or enabled attribute of
transformers or checkers, but they must call the :meth:`sort_checkers`
or :meth:`sort_transformers` method after changing the priority.
"""
multi_line_specials = Bool(True).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(PrefilterManager, self).__init__(shell=shell, **kwargs)
self.shell = shell
self._transformers = []
self.init_handlers()
self.init_checkers()
#-------------------------------------------------------------------------
# API for managing transformers
#-------------------------------------------------------------------------
def sort_transformers(self):
"""Sort the transformers by priority.
This must be called after the priority of a transformer is changed.
The :meth:`register_transformer` method calls this automatically.
"""
self._transformers.sort(key=lambda x: x.priority)
@property
def transformers(self):
"""Return a list of checkers, sorted by priority."""
return self._transformers
def register_transformer(self, transformer):
"""Register a transformer instance."""
if transformer not in self._transformers:
self._transformers.append(transformer)
self.sort_transformers()
def unregister_transformer(self, transformer):
"""Unregister a transformer instance."""
if transformer in self._transformers:
self._transformers.remove(transformer)
#-------------------------------------------------------------------------
# API for managing checkers
#-------------------------------------------------------------------------
def init_checkers(self):
"""Create the default checkers."""
self._checkers = []
for checker in _default_checkers:
checker(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_checkers(self):
"""Sort the checkers by priority.
This must be called after the priority of a checker is changed.
The :meth:`register_checker` method calls this automatically.
"""
self._checkers.sort(key=lambda x: x.priority)
@property
def checkers(self):
"""Return a list of checkers, sorted by priority."""
return self._checkers
def register_checker(self, checker):
"""Register a checker instance."""
if checker not in self._checkers:
self._checkers.append(checker)
self.sort_checkers()
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker)
#-------------------------------------------------------------------------
# API for managing handlers
#-------------------------------------------------------------------------
def init_handlers(self):
"""Create the default handlers."""
self._handlers = {}
self._esc_handlers = {}
for handler in _default_handlers:
handler(
shell=self.shell, prefilter_manager=self, parent=self
)
@property
def handlers(self):
"""Return a dict of all the handlers."""
return self._handlers
def register_handler(self, name, handler, esc_strings):
"""Register a handler instance by name with esc_strings."""
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler
def unregister_handler(self, name, handler, esc_strings):
"""Unregister a handler instance by name with esc_strings."""
try:
del self._handlers[name]
except KeyError:
pass
for esc_str in esc_strings:
h = self._esc_handlers.get(esc_str)
if h is handler:
del self._esc_handlers[esc_str]
def get_handler_by_name(self, name):
"""Get a handler by its name."""
return self._handlers.get(name)
def get_handler_by_esc(self, esc_str):
"""Get a handler by its escape string."""
return self._esc_handlers.get(esc_str)
#-------------------------------------------------------------------------
# Main prefiltering API
#-------------------------------------------------------------------------
def prefilter_line_info(self, line_info):
"""Prefilter a line that has been converted to a LineInfo object.
This implements the checker/handler part of the prefilter pipe.
"""
# print("prefilter_line_info: ", line_info)
handler = self.find_handler(line_info)
return handler.handle(line_info)
def find_handler(self, line_info):
"""Find a handler for the line_info by trying checkers."""
for checker in self.checkers:
if checker.enabled:
handler = checker.check(line_info)
if handler:
return handler
return self.get_handler_by_name('normal')
def transform_line(self, line, continue_prompt):
"""Calls the enabled transformers in order of increasing priority."""
for transformer in self.transformers:
if transformer.enabled:
line = transformer.transform(line, continue_prompt)
return line
def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print("prefilter_line: ", line, continue_prompt)
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return ''
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt)
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
return normal_handler.handle(line_info)
# special handlers are only allowed for single line statements
if continue_prompt and not self.multi_line_specials:
return normal_handler.handle(line_info)
prefiltered = self.prefilter_line_info(line_info)
# print("prefiltered line: %r" % prefiltered)
return prefiltered
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out
#-----------------------------------------------------------------------------
# Prefilter transformers
#-----------------------------------------------------------------------------
| PrefilterManager |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 31594,
"end": 32795
} | class ____(Response):
"""
Response of queues.delete_metadata endpoint.
:param updated: Number of queues updated (0 or 1)
:type updated: int
"""
_service = "queues"
_action = "delete_metadata"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of queues updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(DeleteMetadataResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| DeleteMetadataResponse |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 822550,
"end": 823081
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("membership_types", "reason")
membership_types = sgqlc.types.Field(
sgqlc.types.list_of(
sgqlc.types.non_null(OrgRemoveMemberAuditEntryMembershipType)
),
graphql_name="membershipTypes",
)
reason = sgqlc.types.Field(OrgRemoveMemberAuditEntryReason, graphql_name="reason")
| OrgRemoveMemberAuditEntry |
python | jschneier__django-storages | tests/test_ftp.py | {
"start": 8144,
"end": 9634
} | class ____(TestCase):
def setUp(self):
self.storage = ftp.FTPStorage(location=URL)
@patch("ftplib.FTP", **{"return_value.retrlines": list_retrlines})
def test_size(self, mock_ftp):
file_ = ftp.FTPStorageFile("fi", self.storage, "wb")
self.assertEqual(file_.size, 1024)
@patch("ftplib.FTP", **{"return_value.pwd.return_value": "foo"})
@patch("storages.backends.ftp.FTPStorage._read", return_value=io.BytesIO(b"foo"))
def test_readlines(self, mock_ftp, mock_storage):
file_ = ftp.FTPStorageFile("fi", self.storage, "wb")
self.assertEqual([b"foo"], file_.readlines())
@patch("ftplib.FTP", **{"return_value.pwd.return_value": "foo"})
@patch("storages.backends.ftp.FTPStorage._read", return_value=io.BytesIO(b"foo"))
def test_read(self, mock_ftp, mock_storage):
file_ = ftp.FTPStorageFile("fi", self.storage, "wb")
self.assertEqual(b"foo", file_.read())
def test_write(self):
file_ = ftp.FTPStorageFile("fi", self.storage, "wb")
file_.write(b"foo")
file_.seek(0)
self.assertEqual(file_.file.read(), b"foo")
@patch("ftplib.FTP", **{"return_value.pwd.return_value": "foo"})
@patch("storages.backends.ftp.FTPStorage._read", return_value=io.BytesIO(b"foo"))
def test_close(self, mock_ftp, mock_storage):
file_ = ftp.FTPStorageFile("fi", self.storage, "wb")
file_.is_dirty = True
file_.read()
file_.close()
| FTPStorageFileTest |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 26747,
"end": 30371
} | class ____(BoringModel):
def __init__(self, my_path, any_param=123):
super().__init__()
self.save_hyperparameters()
def test_model_with_fsspec_as_parameter(tmp_path):
model = UnsafeParamModel(LocalFileSystem(tmp_path))
trainer = Trainer(
default_root_dir=tmp_path, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, max_epochs=1
)
weights_only = False if _TORCH_GREATER_EQUAL_2_6 else None
trainer.fit(model, weights_only=weights_only)
trainer.test(weights_only=weights_only)
@pytest.mark.xfail(
# AttributeError: 'OrphanPath' object has no attribute 'exists'
# Issue with `importlib_resources>=6.2.0`
raises=AttributeError,
condition=(not _PYTHON_GREATER_EQUAL_3_9_0),
reason="Issue with `importlib_resources`",
strict=False,
)
@pytest.mark.skipif(RequirementCache("hydra-core<1.1"), reason="Requires Hydra's Compose API")
def test_model_save_hyper_parameters_interpolation_with_hydra(tmp_path):
"""This test relies on configuration saved under tests/models/conf/config.yaml."""
from hydra import compose, initialize
class TestHydraModel(BoringModel):
def __init__(self, args_0, args_1, args_2, kwarg_1=None):
self.save_hyperparameters()
assert self.hparams.args_0.log == "Something"
assert self.hparams.args_1["cfg"].log == "Something"
assert self.hparams.args_2[0].log == "Something"
assert self.hparams.kwarg_1["cfg"][0].log == "Something"
super().__init__()
with initialize(config_path="conf"):
args_0 = compose(config_name="config")
args_1 = {"cfg": compose(config_name="config")}
args_2 = [compose(config_name="config")]
kwarg_1 = {"cfg": [compose(config_name="config")]}
model = TestHydraModel(args_0, args_1, args_2, kwarg_1=kwarg_1)
epochs = 2
checkpoint_callback = ModelCheckpoint(monitor=None, dirpath=tmp_path, save_top_k=-1)
trainer = Trainer(
default_root_dir=tmp_path,
callbacks=[checkpoint_callback],
limit_train_batches=10,
limit_val_batches=10,
max_epochs=epochs,
logger=False,
)
trainer.fit(model)
_ = TestHydraModel.load_from_checkpoint(checkpoint_callback.best_model_path, weights_only=False)
@pytest.mark.parametrize("ignore", ["arg2", ("arg2", "arg3")])
def test_ignore_args_list_hparams(tmp_path, ignore):
"""Tests that args can be ignored in save_hyperparameters."""
class LocalModel(BoringModel):
def __init__(self, arg1, arg2, arg3):
super().__init__()
self.save_hyperparameters(ignore=ignore)
model = LocalModel(arg1=14, arg2=90, arg3=50)
# test proper property assignments
assert model.hparams.arg1 == 14
for arg in ignore:
assert arg not in model.hparams
# verify we can train
trainer = Trainer(default_root_dir=tmp_path, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path, weights_only=True)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["arg1"] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, arg2=123, arg3=100)
assert model.hparams.arg1 == 14
for arg in ignore:
assert arg not in model.hparams
| UnsafeParamModel |
python | pandas-dev__pandas | pandas/tests/reductions/test_reductions.py | {
"start": 8041,
"end": 18990
} | class ____:
# Note: the name TestIndexReductions indicates these tests
# were moved from an Index-specific test file, _not_ that these tests are
# intended long-term to be Index-specific
@pytest.mark.parametrize(
"start,stop,step",
[
(0, 400, 3),
(500, 0, -6),
(-(10**6), 10**6, 4),
(10**6, -(10**6), -4),
(0, 10, 20),
],
)
def test_max_min_range(self, start, stop, step):
# GH#17607
idx = RangeIndex(start, stop, step)
expected = idx._values.max()
result = idx.max()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.max(skipna=False)
assert result2 == expected
expected = idx._values.min()
result = idx.min()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.min(skipna=False)
assert result2 == expected
# empty
idx = RangeIndex(start, stop, -step)
assert isna(idx.max())
assert isna(idx.min())
def test_minmax_timedelta64(self):
# monotonic
idx1 = TimedeltaIndex(["1 days", "2 days", "3 days"])
assert idx1.is_monotonic_increasing
# non-monotonic
idx2 = TimedeltaIndex(["1 days", np.nan, "3 days", "NaT"])
assert not idx2.is_monotonic_increasing
for idx in [idx1, idx2]:
assert idx.min() == Timedelta("1 days")
assert idx.max() == Timedelta("3 days")
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_timedelta_empty_or_na(self, op):
# Return NaT
obj = TimedeltaIndex([])
assert getattr(obj, op)() is NaT
obj = TimedeltaIndex([NaT])
assert getattr(obj, op)() is NaT
obj = TimedeltaIndex([NaT, NaT, NaT])
assert getattr(obj, op)() is NaT
def test_numpy_minmax_timedelta64(self):
td = timedelta_range("16815 days", "16820 days", freq="D")
assert np.min(td) == Timedelta("16815 days")
assert np.max(td) == Timedelta("16820 days")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(td, out=0)
def test_timedelta_ops(self):
# GH#4984
# make sure ops return Timedelta
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(0.1)
expected = Timedelta(np.timedelta64(2600, "ms"))
assert result == expected
result = td.median()
expected = to_timedelta("00:00:09")
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH#6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta("00:01:21")
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# GH#10040
# make sure NaT is properly handled by median()
s = Series([Timestamp("2015-02-03"), Timestamp("2015-02-07")])
assert s.diff().median() == timedelta(days=4)
s = Series(
[Timestamp("2015-02-03"), Timestamp("2015-02-07"), Timestamp("2015-02-15")]
)
assert s.diff().median() == timedelta(days=6)
@pytest.mark.parametrize("opname", ["skew", "kurt", "sem", "prod", "var"])
def test_invalid_td64_reductions(self, opname):
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
msg = "|".join(
[
f"reduction operation '{opname}' not allowed for this dtype",
rf"cannot perform {opname} with type timedelta64\[ns\]",
f"does not support operation '{opname}'",
]
)
with pytest.raises(TypeError, match=msg):
getattr(td, opname)()
with pytest.raises(TypeError, match=msg):
getattr(td.to_frame(), opname)(numeric_only=False)
def test_minmax_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# monotonic
idx1 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz)
assert idx1.is_monotonic_increasing
# non-monotonic
idx2 = DatetimeIndex(
["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], tz=tz
)
assert not idx2.is_monotonic_increasing
for idx in [idx1, idx2]:
assert idx.min() == Timestamp("2011-01-01", tz=tz)
assert idx.max() == Timestamp("2011-01-03", tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_nat_datetime64(self, op):
# Return NaT
obj = DatetimeIndex([])
assert isna(getattr(obj, op)())
obj = DatetimeIndex([NaT])
assert isna(getattr(obj, op)())
obj = DatetimeIndex([NaT, NaT, NaT])
assert isna(getattr(obj, op)())
def test_numpy_minmax_integer(self):
# GH#26125
idx = Index([1, 2, 3])
expected = idx.values.max()
result = np.max(idx)
assert result == expected
expected = idx.values.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
expected = idx.values.argmax()
result = np.argmax(idx)
assert result == expected
expected = idx.values.argmin()
result = np.argmin(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(idx, out=0)
def test_numpy_minmax_range(self):
# GH#26125
idx = RangeIndex(0, 10, 3)
result = np.max(idx)
assert result == 9
result = np.min(idx)
assert result == 0
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
# No need to test again argmax/argmin compat since the implementation
# is the same as basic integer index
def test_numpy_minmax_datetime64(self):
dr = date_range(start="2016-01-15", end="2016-01-20")
assert np.min(dr) == Timestamp("2016-01-15 00:00:00")
assert np.max(dr) == Timestamp("2016-01-20 00:00:00")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(dr, out=0)
def test_minmax_period(self):
# monotonic
idx1 = PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
assert not idx1.is_monotonic_increasing
assert idx1[1:].is_monotonic_increasing
# non-monotonic
idx2 = PeriodIndex(
["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], freq="D"
)
assert not idx2.is_monotonic_increasing
for idx in [idx1, idx2]:
assert idx.min() == Period("2011-01-01", freq="D")
assert idx.max() == Period("2011-01-03", freq="D")
assert idx1.argmin() == 1
assert idx2.argmin() == 0
assert idx1.argmax() == 3
assert idx2.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
@pytest.mark.parametrize("data", [[], [NaT], [NaT, NaT, NaT]])
def test_minmax_period_empty_nat(self, op, data):
# Return NaT
obj = PeriodIndex(data, freq="M")
result = getattr(obj, op)()
assert result is NaT
def test_numpy_minmax_period(self):
pr = period_range(start="2016-01-15", end="2016-01-20")
assert np.min(pr) == Period("2016-01-15", freq="D")
assert np.max(pr) == Period("2016-01-20", freq="D")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(pr, out=0)
assert np.argmin(pr) == 0
assert np.argmax(pr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(pr, out=0)
def test_min_max_categorical(self):
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
msg = (
r"Categorical is not ordered for operation min\n"
r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n"
)
with pytest.raises(TypeError, match=msg):
ci.min()
msg = (
r"Categorical is not ordered for operation max\n"
r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n"
)
with pytest.raises(TypeError, match=msg):
ci.max()
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=True)
assert ci.min() == "c"
assert ci.max() == "b"
| TestIndexReductions |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/collective_ops_test.py | {
"start": 5329,
"end": 18384
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context(num_devices=16)
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
tokens = {}
for dev in [dev0, dev1]:
with ops.device(dev):
tokens[dev] = create_ordering_token()
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
ordering_token=tokens[dev0])
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev1],
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
tokens = {}
for dev in [dev0, dev1]:
with ops.device(dev):
tokens[dev] = create_ordering_token()
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev1],
communication_hint=communication))
return collectives
cpu_tokens = {}
for i in range(16):
with ops.device('/device:CPU:%d' % i):
cpu_tokens[i] = create_ordering_token()
@def_function.function
def run_all_gather_16devices():
group_size = 16
group_key = 3
instance_key = 1
collectives = []
for i in range(16):
with ops.device('/device:CPU:%d' % i):
collectives.append(
collective_ops.all_gather(
constant_op.constant([i]),
group_size,
group_key,
instance_key,
ordering_token=cpu_tokens[i],
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_16devices():
self.assertAllClose(
result, list(range(16)), rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testAllToAll(self, collective_ops, device, communication):
if str(collective_ops) == 'v1':
self.skipTest('CollectiveAllToAllV1 is not implemented.')
devices = ['/device:%s:0' % device, '/device:%s:1' % device]
tokens = {}
for dev in devices:
with ops.device(dev):
tokens[dev] = create_ordering_token()
@def_function.function
def run_all_to_all_1device():
with ops.device(devices[0]):
in_value = constant_op.constant([1.0])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_to_all(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
ordering_token=tokens[devices[0]],
)
@def_function.function
def run_all_to_all_2devices():
group_size = 2
group_key = 2
instance_key = 2
collectives = []
for i in range(2):
with ops.device(devices[i]):
collectives.append(
collective_ops.all_to_all(
constant_op.constant([i, i]),
group_size,
group_key,
instance_key,
ordering_token=tokens[devices[i]],
communication_hint=communication,
)
)
return collectives
self.assertAllClose(run_all_to_all_1device(), [1.0])
for result in run_all_to_all_2devices():
self.assertAllClose(result, [0.0, 1.0])
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
tokens = {}
for dev in [dev0, dev1, dev2, dev3]:
with ops.device(dev):
tokens[dev] = create_ordering_token()
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.),
group_size,
group0_key,
instance_key,
ordering_token=tokens[dev0],
))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.),
group_size,
group0_key,
instance_key,
ordering_token=tokens[dev1],
))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.),
group_size,
group1_key,
instance_key,
ordering_token=tokens[dev2],
))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.),
group_size,
group1_key,
instance_key,
ordering_token=tokens[dev3],
))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
tokens = {}
for dev in [dev0]:
with ops.device(dev):
tokens[dev] = create_ordering_token()
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
ordering_token=tokens[dev0],
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
ordering_token=tokens[dev0],
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
tokens = {}
for dev in [dev0]:
with ops.device(dev):
tokens[dev] = create_ordering_token()
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
tokens = {}
for device_idx in range(num_elements):
dev = '/{}:{}'.format(device, device_idx)
with ops.device(dev):
tokens[dev] = create_ordering_token()
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
dev = '/{}:{}'.format(device, device_idx)
with ops.device(dev):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev],
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[combinations.NamedObject('v2', CollectiveOpsV2)],
mode='eager',
max_subdivs_per_device=[-1, 0, 16]), device_combination))
| CollectiveOpsTest |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_import_export.py | {
"start": 19853,
"end": 20906
} | class ____(TestCase):
"""
Issue 2020 - export should handle QuerySet.values()
"""
class _EBookResource(ModelResource):
def get_queryset(self):
return EBook.objects.all().values("id", "name", "published")
class Meta:
model = EBook
fields = ("id", "name", "published")
def setUp(self):
super().setUp()
self.resource = QuerysetValuesOnExportTest._EBookResource()
EBook.objects.create(id=101, name="Moonraker", published=date(1955, 4, 5))
def test_export(self):
res = self.resource.export()
self.assertEqual(1, len(res.dict))
self.assertDictEqual(
{"id": "101", "name": "Moonraker", "published": "1955-04-05"},
res.dict.pop(),
)
def test_get_value_returns_none_when_attribute_missing(self):
instance = {"some_other_key": "value"}
field = Field(attribute="missing_attribute")
result = field.get_value(instance)
self.assertIsNone(result)
| QuerysetValuesOnExportTest |
python | sphinx-doc__sphinx | sphinx/ext/autosummary/generate.py | {
"start": 2044,
"end": 2131
} | class ____:
def emit_firstresult(self, *args: Any) -> None:
pass
| _DummyEvents |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_write_num_fmts.py | {
"start": 332,
"end": 1087
} | class ____(unittest.TestCase):
"""
Test the Styles _write_num_fmts() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_num_fmts(self):
"""Test the _write_num_fmts() method"""
xf_format = Format()
xf_format.num_format_index = 164
xf_format.set_num_format("#,##0.0")
self.styles._set_style_properties(
[[xf_format], None, 0, ["#,##0.0"], 0, 0, [], [], 0]
)
self.styles._write_num_fmts()
exp = """<numFmts count="1"><numFmt numFmtId="164" formatCode="#,##0.0"/></numFmts>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteNumFmts |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 7310,
"end": 8951
} | class ____(WebAuthnCredentialMixin, wtforms.Form):
__params__ = ["label", "credential"]
label = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(message="Specify a label"),
wtforms.validators.Length(
max=64, message=("Label must be 64 characters or less")
),
]
)
def __init__(
self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs
):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.user_id = user_id
self.challenge = challenge
self.rp_id = rp_id
self.origin = origin
def validate_credential(self, field):
try:
json.loads(field.data.encode("utf-8"))
except json.JSONDecodeError:
raise wtforms.validators.ValidationError(
"Invalid WebAuthn credential: Bad payload"
)
try:
validated_credential = self.user_service.verify_webauthn_credential(
field.data.encode("utf-8"),
challenge=self.challenge,
rp_id=self.rp_id,
origin=self.origin,
)
except webauthn.RegistrationRejectedError as e:
raise wtforms.validators.ValidationError(str(e))
self.validated_credential = validated_credential
def validate_label(self, field):
label = field.data
if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:
raise wtforms.validators.ValidationError(f"Label '{label}' already in use")
| ProvisionWebAuthnForm |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 7721,
"end": 7791
} | class ____(NumType):
"""
Describes integer data.
"""
| IntType |
python | langchain-ai__langchain | libs/core/langchain_core/utils/aiter.py | {
"start": 5032,
"end": 8996
} | class ____(Generic[T]):
"""Create `n` separate asynchronous iterators over `iterable`.
This splits a single `iterable` into multiple iterators, each providing
the same items in the same order.
All child iterators may advance separately but share the same items
from `iterable` -- when the most advanced iterator retrieves an item,
it is buffered until the least advanced iterator has yielded it as well.
A `tee` works lazily and can handle an infinite `iterable`, provided
that all iterators advance.
```python
async def derivative(sensor_data):
previous, current = a.tee(sensor_data, n=2)
await a.anext(previous) # advance one iterator
return a.map(operator.sub, previous, current)
```
Unlike `itertools.tee`, `.tee` returns a custom type instead
of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked
to get the child iterators. In addition, its `.tee.aclose` method
immediately closes all children, and it can be used in an `async with` context
for the same effect.
If `iterable` is an iterator and read elsewhere, `tee` will *not*
provide these items. Also, `tee` must internally buffer each item until the
last iterator has yielded it; if the most and least advanced iterator differ
by most data, using a :py`list` is more efficient (but not lazy).
If the underlying iterable is concurrency safe (`anext` may be awaited
concurrently) the resulting iterators are concurrency safe as well. Otherwise,
the iterators are safe if there is only ever one single "most advanced" iterator.
To enforce sequential use of `anext`, provide a `lock`
- e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application -
and access is automatically synchronised.
"""
def __init__(
self,
iterable: AsyncIterator[T],
n: int = 2,
*,
lock: AbstractAsyncContextManager[Any] | None = None,
):
"""Create a `tee`.
Args:
iterable: The iterable to split.
n: The number of iterators to create.
lock: The lock to synchronise access to the shared buffers.
"""
self._iterator = iterable.__aiter__() # before 3.10 aiter() doesn't exist
self._buffers: list[deque[T]] = [deque() for _ in range(n)]
self._children = tuple(
tee_peer(
iterator=self._iterator,
buffer=buffer,
peers=self._buffers,
lock=lock if lock is not None else NoLock(),
)
for buffer in self._buffers
)
def __len__(self) -> int:
"""Return the number of child iterators."""
return len(self._children)
@overload
def __getitem__(self, item: int) -> AsyncIterator[T]: ...
@overload
def __getitem__(self, item: slice) -> tuple[AsyncIterator[T], ...]: ...
def __getitem__(
self, item: int | slice
) -> AsyncIterator[T] | tuple[AsyncIterator[T], ...]:
"""Return the child iterator(s) for the given index or slice."""
return self._children[item]
def __iter__(self) -> Iterator[AsyncIterator[T]]:
"""Iterate over the child iterators.
Yields:
The child iterators.
"""
yield from self._children
async def __aenter__(self) -> "Tee[T]":
"""Return the tee instance."""
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
"""Close all child iterators.
Returns:
False, exceptions not suppressed.
"""
await self.aclose()
return False
async def aclose(self) -> None:
"""Async close all child iterators."""
for child in self._children:
await child.aclose()
atee = Tee
| Tee |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/metadatasource.py | {
"start": 462,
"end": 2174
} | class ____(ModelMetaclass):
__cls_set: Set[Type] = set()
def __new__( # noqa: PYI034 # Self cannot be used with Metaclass
meta_cls: Type[MetaDatasource], cls_name: str, bases: tuple[type], cls_dict
) -> MetaDatasource:
"""
MetaDatasource hook that runs when a new `Datasource` is defined.
This methods binds a factory method for the defined `Datasource` to `DataSourceManager` class which becomes
available as part of the `DataContext`.
Also binds asset adding methods according to the declared `asset_types`.
""" # noqa: E501 # FIXME CoP
logger.debug(f"1a. {meta_cls.__name__}.__new__() for `{cls_name}`")
cls = super().__new__(meta_cls, cls_name, bases, cls_dict)
if cls_name in ("Datasource", "InvalidDatasource") or cls_name.startswith("_"):
# NOTE: the above check is brittle and must be kept in-line with the Datasource.__name__
logger.debug(f"1c. Skip factory registration of base `{cls_name}`")
return cls
logger.debug(f" {cls_name} __dict__ ->\n{pf(cls.__dict__, depth=3)}")
meta_cls.__cls_set.add(cls)
logger.debug(f"Datasources: {len(meta_cls.__cls_set)}")
if cls.__module__ == "__main__":
logger.warning(
f"Datasource `{cls_name}` should not be defined as part of __main__ this may cause typing lookup collisions" # noqa: E501 # FIXME CoP
)
# instantiate new TypeLookup to prevent child classes conflicts with parent class asset types # noqa: E501 # FIXME CoP
cls._type_lookup = TypeLookup()
DataSourceManager.register_datasource(cls)
return cls
| MetaDatasource |
python | kamyu104__LeetCode-Solutions | Python/reorder-routes-to-make-all-paths-lead-to-the-city-zero.py | {
"start": 50,
"end": 762
} | class ____(object):
def minReorder(self, n, connections):
"""
:type n: int
:type connections: List[List[int]]
:rtype: int
"""
lookup, graph = set(), collections.defaultdict(list)
for u, v in connections:
lookup.add(u*n+v)
graph[v].append(u)
graph[u].append(v)
result = 0
stk = [(-1, 0)]
while stk:
parent, u = stk.pop()
result += (parent*n+u in lookup)
for v in reversed(graph[u]):
if v == parent:
continue
stk.append((u, v))
return result
# Time: O(n)
# Space: O(n)
import collections
| Solution |
python | pyparsing__pyparsing | examples/bf.py | {
"start": 2641,
"end": 2742
} | class ____(Instruction):
def execute(self, bf_engine: BFEngine):
bf_engine.ptr += 1
| IncrPtr |
python | doocs__leetcode | solution/2000-2099/2009.Minimum Number of Operations to Make Array Continuous/Solution.py | {
"start": 0,
"end": 272
} | class ____:
def minOperations(self, nums: List[int]) -> int:
ans = n = len(nums)
nums = sorted(set(nums))
for i, v in enumerate(nums):
j = bisect_right(nums, v + n - 1)
ans = min(ans, n - (j - i))
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-moves-to-make-palindrome.py | {
"start": 33,
"end": 445
} | class ____(object): # 0-indexed
def __init__(self, n):
self.__bit = [0]*(n+1)
def add(self, i, val):
i += 1
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
# greedy, bit, fenwick tree
| BIT |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0129_addons_notification_data_migration.py | {
"start": 914,
"end": 1163
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0128_addons_notifications"),
]
operations = [
migrations.RunPython(forward_add_fields, reverse_remove_fields),
]
| Migration |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 45734,
"end": 49810
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: SpeechT5Config, layer_idx=None):
super().__init__()
self.self_attn = SpeechT5Attention(
embed_dim=config.hidden_size,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.encoder_attn = SpeechT5Attention(
config.hidden_size,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = SpeechT5FeedForward(config, config.decoder_ffn_dim)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, hidden_size)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| SpeechT5DecoderLayer |
python | joke2k__faker | tests/providers/test_geo.py | {
"start": 4967,
"end": 5262
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("pt_PT")
Faker.seed(0)
def test_nationality(self):
nationality = self.fake.nationality()
assert isinstance(nationality, str)
assert nationality in PtPtProvider.nationalities
| TestPtPT |
python | facebook__pyre-check | scripts/callgraph_utilities.py | {
"start": 7159,
"end": 9033
} | class ____:
dependency_graph: Dict[str, Set[str]]
entrypoints: Entrypoints
def __init__(self, input_call_graph: InputFormat, entrypoints: Entrypoints) -> None:
self.entrypoints = entrypoints
self.dependency_graph = defaultdict(lambda: set())
call_graph = input_call_graph.call_graph
for caller, callees in call_graph.items():
for callee in callees:
if caller == callee:
# skip self-references
continue
self.dependency_graph[callee].add(caller)
def find_traces_for_callees(
self, callees: Collection[str]
) -> Dict[str, Optional[Trace]]:
result = {}
for callee in callees:
if callee in self.dependency_graph and callee not in result:
result[callee] = self.find_shortest_trace_to_entrypoint(callee)
elif callee not in result:
result[callee] = None
return result
def find_shortest_trace_to_entrypoint(self, start_call: str) -> Optional[Trace]:
if start_call in self.entrypoints.entrypoints:
return [start_call]
queue: Deque[Trace] = deque([[start_call]])
visited = set()
while queue:
current_node_path = queue.popleft()
current_node = current_node_path[-1]
for caller in self.dependency_graph[current_node]:
if caller in visited:
continue
visited.add(caller)
next_node_path = current_node_path + [caller]
if caller in self.entrypoints.entrypoints:
return next_node_path
queue.append(next_node_path)
return []
@staticmethod
def node_path_to_str(node_path: Trace) -> str:
return " -> ".join(node_path)
| DependencyGraph |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_stateful.py | {
"start": 5686,
"end": 10458
} | class ____(RuleBasedStateMachine):
b = Bundle("b")
def __init__(self):
self.expected_bundle_length = 0
super().__init__()
@invariant()
def bundle_length(self):
assert len(self.bundle("b")) == self.expected_bundle_length
@rule(target=b, items=lists(elements=integers(), max_size=10))
def populate_bundle(self, items):
self.expected_bundle_length += len(items)
return multiple(*items)
@rule(target=b)
def do_not_populate(self):
return multiple()
TestMachineUsingMultiple = MachineUsingMultiple.TestCase
def test_multiple_variables_printed():
class ProducesMultiple(RuleBasedStateMachine):
b = Bundle("b")
@initialize(target=b)
def populate_bundle(self):
return multiple(1, 2)
@rule()
def fail_fast(self):
raise AssertionError
with raises(AssertionError) as err:
run_state_machine_as_test(ProducesMultiple)
# This is tightly coupled to the output format of the step printing.
# The first line is "Falsifying Example:..." the second is creating
# the state machine, the third is calling the "initialize" method.
assignment_line = err.value.__notes__[2]
# 'populate_bundle()' returns 2 values, so should be
# expanded to 2 variables.
assert assignment_line == "b_0, b_1 = state.populate_bundle()"
# Make sure MultipleResult is iterable so the printed code is valid.
# See https://github.com/HypothesisWorks/hypothesis/issues/2311
state = ProducesMultiple()
_b_0, _b_1 = state.populate_bundle()
with raises(AssertionError):
state.fail_fast()
def test_multiple_variables_printed_single_element():
# https://github.com/HypothesisWorks/hypothesis/issues/3236
class ProducesMultiple(RuleBasedStateMachine):
b = Bundle("b")
@initialize(target=b)
def populate_bundle(self):
return multiple(1)
@rule(b=b)
def fail_fast(self, b):
assert b != 1
with raises(AssertionError) as err:
run_state_machine_as_test(ProducesMultiple)
assignment_line = err.value.__notes__[2]
assert assignment_line == "(b_0,) = state.populate_bundle()"
state = ProducesMultiple()
(v1,) = state.populate_bundle()
state.fail_fast((v1,)) # passes if tuple not unpacked
with raises(AssertionError):
state.fail_fast(v1)
def test_no_variables_printed():
class ProducesNoVariables(RuleBasedStateMachine):
b = Bundle("b")
@initialize(target=b)
def populate_bundle(self):
return multiple()
@rule()
def fail_fast(self):
raise AssertionError
with raises(AssertionError) as err:
run_state_machine_as_test(ProducesNoVariables)
# This is tightly coupled to the output format of the step printing.
# The first line is "Falsifying Example:..." the second is creating
# the state machine, the third is calling the "initialize" method.
assignment_line = err.value.__notes__[2]
# 'populate_bundle()' returns 0 values, so there should be no
# variable assignment.
assert assignment_line == "state.populate_bundle()"
def test_consumes_typecheck():
with pytest.raises(TypeError):
consumes(integers())
def test_ratchetting_raises_flaky():
with raises(Flaky):
FlakyRatchettingMachine.TestCase().runTest()
def test_empty_machine_is_invalid():
class EmptyMachine(RuleBasedStateMachine):
pass
with raises(InvalidDefinition):
EmptyMachine.TestCase().runTest()
def test_machine_with_no_terminals_is_invalid():
class NonTerminalMachine(RuleBasedStateMachine):
@rule(value=Bundle("hi"))
def bye(self, hi):
pass
with raises(InvalidDefinition):
NonTerminalMachine.TestCase().runTest()
def test_minimizes_errors_in_teardown():
# temporary debugging to try to narrow down a potential thread-safety issue
import threading
from hypothesis import Verbosity
counter = 0
@Settings(database=None, verbosity=Verbosity.debug)
class Foo(RuleBasedStateMachine):
@initialize()
def init(self):
nonlocal counter
counter = 0
print(f"[{threading.get_ident()}] init", counter)
@rule()
def increment(self):
nonlocal counter
counter += 1
print(f"[{threading.get_ident()}] increment", counter)
def teardown(self):
nonlocal counter
print(f"[{threading.get_ident()}] teardown", counter)
assert not counter
with raises(AssertionError):
run_state_machine_as_test(Foo)
assert counter == 1
| MachineUsingMultiple |
python | Lightning-AI__lightning | examples/pytorch/basics/autoencoder.py | {
"start": 1393,
"end": 4176
} | class ____(callbacks.Callback):
def __init__(
self,
num_samples: int = 3,
nrow: int = 8,
padding: int = 2,
normalize: bool = True,
value_range: Optional[tuple[int, int]] = None,
scale_each: bool = False,
pad_value: int = 0,
) -> None:
"""
Args:
num_samples: Number of images displayed in the grid. Default: ``3``.
nrow: Number of images displayed in each row of the grid.
The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
padding: Amount of padding. Default: ``2``.
normalize: If ``True``, shift the image to the range (0, 1),
by the min and max values specified by :attr:`range`. Default: ``False``.
value_range: Tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each: If ``True``, scale each image in the batch of
images separately rather than the (min, max) over all images. Default: ``False``.
pad_value: Value for the padded pixels. Default: ``0``.
"""
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError("You want to use `torchvision` which is not installed yet.")
super().__init__()
self.num_samples = num_samples
self.nrow = nrow
self.padding = padding
self.normalize = normalize
self.value_range = value_range
self.scale_each = scale_each
self.pad_value = pad_value
def _to_grid(self, images):
return torchvision.utils.make_grid(
tensor=images,
nrow=self.nrow,
padding=self.padding,
normalize=self.normalize,
value_range=self.value_range,
scale_each=self.scale_each,
pad_value=self.pad_value,
)
@rank_zero_only
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
if not _TORCHVISION_AVAILABLE:
return
images, _ = next(iter(DataLoader(trainer.datamodule.mnist_val, batch_size=self.num_samples)))
images_flattened = images.view(images.size(0), -1)
# generate images
with torch.no_grad():
pl_module.eval()
images_generated = pl_module(images_flattened.to(pl_module.device))
pl_module.train()
if trainer.current_epoch == 0:
save_image(self._to_grid(images), f"grid_ori_{trainer.current_epoch}.png")
save_image(self._to_grid(images_generated.reshape(images.shape)), f"grid_generated_{trainer.current_epoch}.png")
| ImageSampler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/lambda9.py | {
"start": 238,
"end": 514
} | class ____(Generic[_OutT]):
@overload
def map(self, func: Callable[[_OutT], Exception], /) -> "Flow[None]": ...
@overload
def map(self, func: Callable[[_OutT], _Out2T], /) -> "Flow[_Out2T]": ...
def map(self, obj, /):
return cast("Flow", self)
| Flow |
python | walkccc__LeetCode | solutions/2572. Count the Number of Square-Free Subsets/2572.py | {
"start": 0,
"end": 1117
} | class ____:
def squareFreeSubsets(self, nums: list[int]) -> int:
MOD = 1_000_000_007
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
def getMask(num: int) -> int:
"""
e.g. num = 10 = 2 * 5, so mask = 0b101 . 0b1010 (append a 0)
num = 15 = 3 * 5, so mask = 0b110 . 0b1100 (append a 0)
num = 25 = 5 * 5, so mask = (-1)2 . (1..1)2 (invalid)
"""
mask = 0
for i, prime in enumerate(primes):
rootCount = 0
while num % prime == 0:
num //= prime
rootCount += 1
if rootCount >= 2:
return -1
if rootCount == 1:
mask |= 1 << i
return mask << 1
masks = [getMask(num) for num in nums]
@functools.lru_cache(None)
def dp(i: int, used: int) -> int:
if i == len(masks):
return 1
pick = dp(i + 1, used | masks[i]) if (masks[i] & used) == 0 else 0
skip = dp(i + 1, used)
return (pick + skip) % MOD
# -1 means that we take no number.
# `used` is initialized to 1 so that -1 & 1 = 1 instead of 0.
return (dp(0, 1) - 1 + MOD) % MOD
| Solution |
python | Netflix__metaflow | metaflow/plugins/cards/exception.py | {
"start": 2962,
"end": 3338
} | class ____(MetaflowException):
headline = "Unable to render @card"
def __init__(self, card_type, args):
msg = (
"Card of type %s is unable to be rendered with arguments %s.\nStack trace : "
" %s" % (card_type, args, traceback.format_exc())
)
super(UnrenderableCardException, self).__init__(msg)
| UnrenderableCardException |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 99,
"end": 158
} | class ____(JoseError):
error = "decode_error"
| DecodeError |
python | tensorflow__tensorflow | tensorflow/python/ops/variables.py | {
"start": 7406,
"end": 54868
} | class ____(trackable.Trackable, metaclass=VariableMetaclass):
"""See the [variable guide](https://tensorflow.org/guide/variable).
A variable maintains shared, persistent state manipulated by a program.
The `Variable()` constructor requires an initial value for the variable, which
can be a `Tensor` of any type and shape. This initial value defines the type
and shape of the variable. After construction, the type and shape of the
variable are fixed. The value can be changed using one of the assign methods.
>>> v = tf.Variable(1.)
>>> v.assign(2.)
<tf.Variable ... shape=() dtype=float32, numpy=2.0>
>>> v.assign_add(0.5)
<tf.Variable ... shape=() dtype=float32, numpy=2.5>
The `shape` argument to `Variable`'s constructor allows you to construct a
variable with a less defined shape than its `initial_value`:
>>> v = tf.Variable(1., shape=tf.TensorShape(None))
>>> v.assign([[1.]])
<tf.Variable ... shape=<unknown> dtype=float32, numpy=array([[1.]], ...)>
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs to operations. Additionally, all the operators overloaded for the
`Tensor` class are carried over to variables.
>>> w = tf.Variable([[1.], [2.]])
>>> x = tf.constant([[3., 4.]])
>>> tf.matmul(w, x)
<tf.Tensor:... shape=(2, 2), ... numpy=
array([[3., 4.],
[6., 8.]], dtype=float32)>
>>> tf.sigmoid(w + x)
<tf.Tensor:... shape=(2, 2), ...>
When building a machine learning model it is often convenient to distinguish
between variables holding trainable model parameters and other variables such
as a `step` variable used to count training steps. To make this easier, the
variable constructor supports a `trainable=<bool>`
parameter. `tf.GradientTape` watches trainable variables by default:
>>> with tf.GradientTape(persistent=True) as tape:
... trainable = tf.Variable(1.)
... non_trainable = tf.Variable(2., trainable=False)
... x1 = trainable * 2.
... x2 = non_trainable * 3.
>>> tape.gradient(x1, trainable)
<tf.Tensor:... shape=(), dtype=float32, numpy=2.0>
>>> assert tape.gradient(x2, non_trainable) is None # Unwatched
Variables are automatically tracked when assigned to attributes of types
inheriting from `tf.Module`.
>>> m = tf.Module()
>>> m.v = tf.Variable([1.])
>>> m.trainable_variables
(<tf.Variable ... shape=(1,) ... numpy=array([1.], dtype=float32)>,)
This tracking then allows saving variable values to
[training checkpoints](https://www.tensorflow.org/guide/checkpoint), or to
[SavedModels](https://www.tensorflow.org/guide/saved_model) which include
serialized TensorFlow graphs.
Variables are often captured and manipulated by `tf.function`s. This works the
same way the un-decorated function would have:
>>> v = tf.Variable(0.)
>>> read_and_decrement = tf.function(lambda: v.assign_sub(0.1))
>>> read_and_decrement()
<tf.Tensor: shape=(), dtype=float32, numpy=-0.1>
>>> read_and_decrement()
<tf.Tensor: shape=(), dtype=float32, numpy=-0.2>
Variables created inside a `tf.function` must be owned outside the function
and be created only once:
>>> class M(tf.Module):
... @tf.function
... def __call__(self, x):
... if not hasattr(self, "v"): # Or set self.v to None in __init__
... self.v = tf.Variable(x)
... return self.v * x
>>> m = M()
>>> m(2.)
<tf.Tensor: shape=(), dtype=float32, numpy=4.0>
>>> m(3.)
<tf.Tensor: shape=(), dtype=float32, numpy=6.0>
>>> m.v
<tf.Variable ... shape=() dtype=float32, numpy=2.0>
See the `tf.function` documentation for details.
"""
@deprecated_args(
None, "A variable's value can be manually cached by calling "
"tf.Variable.read_value() under a tf.device scope. The caching_device "
"argument does not work properly.", "caching_device")
def __init__(self,
initial_value=None,
trainable=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE,
shape=None,
experimental_enable_variable_lifting=True,
):
"""Creates a new variable with value `initial_value`.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, GradientTapes automatically watch uses of this
variable. Defaults to `True`, unless `synchronization` is set to
`ON_READ`, in which case it defaults to `False`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Note: This argument is only valid when using a v1-style
`Session`. Optional device string describing where the Variable should
be cached for reading. Defaults to the Variable's device. If not `None`,
caches on another device. Typical use is to cache on the device where
the Ops using the Variable reside, to deduplicate copying through
`Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates the
Variable object with its contents, referencing the variable's nodes in
the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type. If
`None`, either the datatype will be kept (if `initial_value` is a
Tensor), or `convert_to_tensor` will decide.
import_scope: Optional `string`. Name scope to add to the `Variable.` Only
used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
experimental_enable_variable_lifting: Whether to lift the variable out if
it's in a `tf.function`. Default is `True`. When this argument
is `True`, variable creation will follow the behavior and
restrictions described
[here](https://www.tensorflow.org/guide/function#creating_tfvariables).
If this argument is `False`, that description doesn't apply,
and you can freely create and use the variable in the
`tf.function`, as if it's a "mutable `tf.Tensor`". You can't
return the variable though.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
raise NotImplementedError
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
raise NotImplementedError
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
raise NotImplementedError
@property
def trainable(self):
raise NotImplementedError
@property
def synchronization(self):
raise NotImplementedError
@property
def aggregation(self):
raise NotImplementedError
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.compat.v1.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If none, the
default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
raise NotImplementedError
@deprecated(
None, "Use Variable.read_value. Variables in 2.X are initialized "
"automatically both in eager and graph (inside tf.defun) contexts.")
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.random.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
raise NotImplementedError
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
raise NotImplementedError
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
raise NotImplementedError
def assign(self, value, use_locking=False, name=None, read_value=True):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The updated variable. If `read_value` is false, instead returns None in
Eager mode and the assign op in graph mode.
"""
raise NotImplementedError
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The updated variable. If `read_value` is false, instead returns None in
Eager mode and the assign op in graph mode.
"""
raise NotImplementedError
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The updated variable. If `read_value` is false, instead returns None in
Eager mode and the assign op in graph mode.
"""
raise NotImplementedError
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `tf.IndexedSlices` from this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_max(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of max with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_min(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of min with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
"""Multiply this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to multiply this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_div(self, sparse_delta, use_locking=False, name=None):
"""Divide this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to divide this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
Assuming the variable has rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into self.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of self.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
v.scatter_nd_sub(indices, updates)
print(v)
```
After the update `v` would look like this:
[1, -9, 3, -6, -4, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
raise NotImplementedError
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into self.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of self.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
v.scatter_nd_add(indices, updates)
print(v)
```
The resulting update to v would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
raise NotImplementedError
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into self.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of self.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
v.scatter_nd_update(indices, updates)
print(v)
```
The resulting update to v would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
raise NotImplementedError
def sparse_read(self, indices, name=None):
r"""Gather slices from params axis axis according to indices.
This function supports a subset of tf.gather, see tf.gather for details on
usage.
Args:
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
raise AttributeError
def gather_nd(self, indices, name=None):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
See tf.gather_nd for details.
Args:
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
raise AttributeError
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
raise NotImplementedError
@deprecated(None,
"Prefer Variable.assign which has equivalent behavior in 2.X.")
def load(self, value, session=None):
"""Load new value into this variable.
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.compat.v1.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If none, the
default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
if context.executing_eagerly():
self.assign(value)
else:
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self.initializer, {self.initializer.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
f"Incompatible type conversion requested to type '{dtype.name}' for "
f"variable of type '{v.dtype.name}' (Variable: {v}).")
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
@classmethod
def _OverloadAllOperators(cls): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in tensor_lib.Tensor.OVERLOADABLE_OPERATORS:
cls._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use _slice_helper_var
# instead)
# pylint: disable=protected-access
setattr(cls, "__getitem__", tensor_getitem_override._slice_helper_var)
@classmethod
def _OverloadOperator(cls, operator): # pylint: disable=invalid-name
"""Defer an operator overload to `tensor_lib.Tensor`.
We pull the operator out of tensor_lib.Tensor dynamically to avoid ordering
issues.
Args:
operator: string. The operator name.
"""
# We can't use the overload mechanism on __eq__ & __ne__ since __eq__ is
# called when adding a variable to sets. As a result we call a.value() which
# causes infinite recursion when operating within a GradientTape
# TODO(gjn): Consider removing this
if operator == "__eq__" or operator == "__ne__":
return
tensor_oper = getattr(tensor_lib.Tensor, operator)
def _run_op(a, *args, **kwargs):
# pylint: disable=protected-access
return tensor_oper(a.value(), *args, **kwargs)
functools.update_wrapper(_run_op, tensor_oper)
setattr(cls, operator, _run_op)
def __hash__(self):
if (
tensor_lib.Tensor._USE_EQUALITY
and ops.executing_eagerly_outside_functions()
): # pylint: disable=protected-access
raise TypeError(
"Variable is unhashable. "
f"Instead, use variable.ref() as the key. (Variable: {self})"
)
else:
return id(self)
# TODO(gjn): duplicate of math_ops.tensor_equals, consider removing
def __eq__(self, other):
"""Compares two variables element-wise for equality."""
if (
tensor_lib.Tensor._USE_EQUALITY
and ops.executing_eagerly_outside_functions()
): # pylint: disable=protected-access
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is other
# TODO(gjn): duplicate of math_ops.tensor_not_equals, consider removing
def __ne__(self, other):
"""Compares two variables element-wise for equality."""
if (
tensor_lib.Tensor._USE_EQUALITY
and ops.executing_eagerly_outside_functions()
): # pylint: disable=protected-access
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is not other
def __iter__(self):
"""When executing eagerly, iterates over the value of the variable."""
return iter(self.read_value())
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
raise NotImplementedError
@property
def _shared_name(self):
"""The shared name of the variable.
Unlike name(), shared_name doesn't have ":0" suffix. It is user-specified
name with name scope prefix.
Returns:
variable name.
"""
return self.name[:self.name.index(":")]
@property
def initializer(self):
"""The initializer operation for this variable."""
raise NotImplementedError
@property
def device(self):
"""The device of this variable."""
raise NotImplementedError
@property
def dtype(self):
"""The `DType` of this variable."""
raise NotImplementedError
@property
def op(self):
"""The `Operation` of this variable."""
raise NotImplementedError
@property
def graph(self):
"""The `Graph` of this variable."""
raise NotImplementedError
@property
def shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
raise NotImplementedError
def get_shape(self) -> tensor_shape.TensorShape:
"""Alias of `Variable.shape`."""
return self.shape
def _gather_saveables_for_checkpoint(self):
"""For implementing `Trackable`. This object is saveable on its own."""
return {trackable.VARIABLE_VALUE_KEY: self}
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
raise NotImplementedError
@staticmethod
def from_proto(variable_def, import_scope=None):
"""Returns a `Variable` object created from `variable_def`."""
raise NotImplementedError
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
@deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
# tf.Tensor also has the same ref() API. If you update the
# documentation here, please update tf.Tensor.ref() as well.
"""Returns a hashable reference object to this Variable.
The primary use case for this API is to put variables in a set/dictionary.
We can't put variables in a set/dictionary as `variable.__hash__()` is no
longer available starting Tensorflow 2.0.
The following will raise an exception starting 2.0
>>> x = tf.Variable(5)
>>> y = tf.Variable(10)
>>> z = tf.Variable(10)
>>> variable_set = {x, y, z}
Traceback (most recent call last):
...
TypeError: Variable is unhashable. Instead, use tensor.ref() as the key.
>>> variable_dict = {x: 'five', y: 'ten'}
Traceback (most recent call last):
...
TypeError: Variable is unhashable. Instead, use tensor.ref() as the key.
Instead, we can use `variable.ref()`.
>>> variable_set = {x.ref(), y.ref(), z.ref()}
>>> x.ref() in variable_set
True
>>> variable_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}
>>> variable_dict[y.ref()]
'ten'
Also, the reference object provides `.deref()` function that returns the
original Variable.
>>> x = tf.Variable(5)
>>> x.ref().deref()
<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=5>
"""
return object_identity.Reference(self)
@classmethod
def _variable_call(
cls,
initial_value=None,
trainable=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE,
shape=None,
experimental_enable_variable_lifting=None,
**kwargs,
):
"""Variable class getter. Useful to force the signature."""
if cls is not Variable:
return None
previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
# Reset `aggregation` that is explicitly set as `None` to the enum NONE.
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
import_scope=import_scope,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
shape=shape,
experimental_enable_variable_lifting=experimental_enable_variable_lifting,
**kwargs
)
class SaveSliceInfo:
"""Information on how to save this Variable as a slice.
Provides internal support for saving variables as slices of a larger
variable. This API is not public and is subject to change.
Available properties:
* full_name
* full_shape
* var_offset
* var_shape
"""
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
"""Create a `SaveSliceInfo`.
Args:
full_name: Name of the full variable of which this `Variable` is a
slice.
full_shape: Shape of the full variable, as a list of int.
var_offset: Offset of this `Variable` into the full variable, as a list
of int.
var_shape: Shape of this `Variable`, as a list of int.
save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
recreates the SaveSliceInfo object its contents. `save_slice_info_def`
and other arguments are mutually exclusive.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
"""
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = list(save_slice_info_def.full_shape)
self.var_offset = list(save_slice_info_def.var_offset)
self.var_shape = list(save_slice_info_def.var_shape)
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
"""Computes the spec string used for saving."""
full_shape_str = " ".join("%d" % d for d in self.full_shape) + " "
sl_spec = ":".join(
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape))
return full_shape_str + sl_spec
@classmethod
def from_spec(cls, spec: str) -> Self:
"""Parses a SaveSliceInfo spec string and returns a SaveSliceInfo object.
Args:
spec: The tensor slice spec string according to the SaveSliceInfo.spec
property. The spec contains the space-separated shape of the full
variable, followed by colon-separated pairs of the variable's offset
and shape, where each pair is comma-separated. For example, consider a
variable whose full shape is [4 3 5], offset is [0 1 3], and shape is
[4 1 2]. This variable's SaveSliceInfo.spec would be
"4 3 5 0,4:1,1:3,2".
Returns:
A SaveSliceInfo object containing the extracted information.
Raises:
ValueError: If the input string is not in the expected format.
"""
if not spec:
return cls()
try:
full_shape_str, slice_str = spec.rsplit(" ", 1)
except ValueError as e:
raise ValueError(
"Spec string must contain space-separated full_shape info.") from e
# Parse the full shape.
full_shape = []
for dim in full_shape_str.split():
try:
full_shape.append(int(dim))
except ValueError as e:
raise ValueError(
"Spec string full_shape must be a sequence of integers. "
f"Found '{dim}', which is not an integer.") from e
# Parse the slice specification.
var_offset = []
var_shape = []
for dim_spec in slice_str.split(":"):
try:
offset, shape = dim_spec.split(",")
except ValueError as e:
raise ValueError(
"Spec string must contain comma-separated pairs of offsets and "
"shapes.") from e
try:
var_offset.append(int(offset))
except ValueError as e:
raise ValueError(
"Spec string var_offset must be an integer. "
f"Found '{offset}', which is not an integer.") from e
try:
var_shape.append(int(shape))
except ValueError as e:
raise ValueError(
"Spec string var_shape must be an integer. "
f"Found '{shape}', which is not an integer.") from e
return cls(
full_shape=full_shape,
var_offset=var_offset,
var_shape=var_shape
)
def to_proto(self, export_scope=None):
"""Returns a SaveSliceInfoDef() proto.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
Variable._OverloadAllOperators() # pylint: disable=protected-access
def _try_guard_against_uninitialized_dependencies(name, initial_value):
"""Attempt to guard against dependencies on uninitialized variables.
Replace references to variables in `initial_value` with references to the
variable's initialized values. The initialized values are essentially
conditional TensorFlow graphs that return a variable's value if it is
initialized or its `initial_value` if it hasn't been initialized. This
replacement is done on a best effort basis:
- If the `initial_value` graph contains cycles, we don't do any
replacements for that graph.
- If the variables that `initial_value` depends on are not present in the
`GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.
In these cases, it is up to the caller to ensure that the `initial_value`
graph uses initialized variables or that they guard access to variables
using their `initialized_value` method.
Args:
name: Variable name.
initial_value: `Tensor`. The initial value.
Returns:
A `Tensor` suitable to initialize a variable.
Raises:
TypeError: If `initial_value` is not a `Tensor`.
"""
if not isinstance(initial_value, tensor_lib.Tensor):
raise TypeError("initial_value needs to be a Tensor: %s" % initial_value)
# Don't modify initial_value if it contains any cyclic dependencies.
if _has_cycle(initial_value.op, state={}):
return initial_value
return _safe_initial_value_from_tensor(name, initial_value, op_cache={})
_UNKNOWN, _STARTED, _FINISHED = range(3)
def _has_cycle(op, state):
"""Detect cycles in the dependencies of `initial_value`."""
op_state = state.get(op.name, _UNKNOWN)
if op_state == _STARTED:
return True
elif op_state == _FINISHED:
return False
state[op.name] = _STARTED
for i in itertools.chain((i.op for i in op.inputs), op.control_inputs):
if _has_cycle(i, state):
return True
state[op.name] = _FINISHED
return False
def _safe_initial_value_from_tensor(name, tensor, op_cache):
"""Replace dependencies on variables with their initialized values.
Args:
name: Variable name.
tensor: A `Tensor`. The tensor to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
A `Tensor` compatible with `tensor`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `tensor` will be returned unchanged.
"""
op = tensor.op
new_op = op_cache.get(op.name)
if new_op is None:
new_op = _safe_initial_value_from_op(name, op, op_cache)
op_cache[op.name] = new_op
return new_op.outputs[tensor.value_index]
def _safe_initial_value_from_op(name, op, op_cache):
"""Replace dependencies on variables with their initialized values.
Args:
name: Variable name.
op: An `Operation`. The operation to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
An `Operation` compatible with `op`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `op` will be returned unchanged.
"""
op_type = op.node_def.op
if op_type in ("IsVariableInitialized", "VarIsInitializedOp",
"ReadVariableOp", "If"):
return op
# Attempt to find the initialized_value of any variable reference / handles.
# TODO(b/70206927): Fix handling of ResourceVariables.
if op_type in ("Variable", "VariableV2", "VarHandleOp"):
initialized_value = _find_initialized_value_for_variable(op)
return op if initialized_value is None else initialized_value.op
# Recursively build initializer expressions for inputs.
modified = False
new_op_inputs = []
for op_input in op.inputs:
new_op_input = _safe_initial_value_from_tensor(name, op_input, op_cache)
new_op_inputs.append(new_op_input)
modified = modified or (new_op_input != op_input)
# If at least one input was modified, replace the op.
if modified:
new_op_type = op_type
if new_op_type == "RefSwitch":
new_op_type = "Switch"
new_op_name = op.node_def.name + "_" + name
new_op_name = new_op_name.replace(":", "_")
return op.graph.create_op(
new_op_type,
new_op_inputs,
op._output_types, # pylint: disable=protected-access
name=new_op_name,
attrs=op.node_def.attr)
return op
def _find_initialized_value_for_variable(variable_op):
"""Find the initialized value for a variable op.
To do so, lookup the variable op in the variables collection.
Args:
variable_op: A variable `Operation`.
Returns:
A `Tensor` representing the initialized value for the variable or `None`
if the initialized value could not be found.
"""
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"]
for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES):
for var in variable_op.graph.get_collection(collection_name):
if var.name in var_names:
return var.initialized_value()
except AttributeError:
# Return None when an incomplete user-defined variable type was put in
# the collection.
return None
return None
| Variable |
python | ray-project__ray | python/ray/serve/_private/application_state.py | {
"start": 44357,
"end": 70793
} | class ____:
def __init__(
self,
deployment_state_manager: DeploymentStateManager,
autoscaling_state_manager: AutoscalingStateManager,
endpoint_state: EndpointState,
kv_store: KVStoreBase,
logging_config: LoggingConfig,
):
self._deployment_state_manager = deployment_state_manager
self._autoscaling_state_manager = autoscaling_state_manager
self._endpoint_state = endpoint_state
self._kv_store = kv_store
self._logging_config = logging_config
self._shutting_down = False
self._application_states: Dict[str, ApplicationState] = {}
self._recover_from_checkpoint()
def _recover_from_checkpoint(self):
checkpoint = self._kv_store.get(CHECKPOINT_KEY)
if checkpoint is not None:
application_state_info = cloudpickle.loads(checkpoint)
for app_name, checkpoint_data in application_state_info.items():
app_state = ApplicationState(
app_name,
self._deployment_state_manager,
self._autoscaling_state_manager,
self._endpoint_state,
self._logging_config,
checkpoint_data.external_scaler_enabled,
)
app_state.recover_target_state_from_checkpoint(checkpoint_data)
self._application_states[app_name] = app_state
def delete_app(self, name: str) -> None:
"""Delete application by name"""
if name not in self._application_states:
return
self._application_states[name].delete()
def deploy_apps(
self,
name_to_deployment_args: Dict[str, List[Dict]],
name_to_application_args: Dict[str, ApplicationArgsProto],
) -> None:
live_route_prefixes: Dict[str, str] = {
app_state.route_prefix: app_name
for app_name, app_state in self._application_states.items()
if app_state.route_prefix is not None
and not app_state.status == ApplicationStatus.DELETING
}
for name, deployment_args in name_to_deployment_args.items():
for deploy_param in deployment_args:
# Make sure route_prefix is not being used by other application.
deploy_app_prefix = deploy_param.get("route_prefix")
if deploy_app_prefix is None:
continue
existing_app_name = live_route_prefixes.get(deploy_app_prefix)
# It's ok to redeploy an app with the same prefix
# if it has the same name as the app already using that prefix.
if existing_app_name is not None and existing_app_name != name:
raise RayServeException(
f"Prefix {deploy_app_prefix} is being used by application "
f'"{existing_app_name}". Failed to deploy application "{name}".'
)
# We might be deploying more than one app,
# so we need to add this app's prefix to the
# set of live route prefixes that we're checking
# against during this batch operation.
live_route_prefixes[deploy_app_prefix] = name
application_args = name_to_application_args.get(name)
external_scaler_enabled = application_args.external_scaler_enabled
if name not in self._application_states:
self._application_states[name] = ApplicationState(
name,
self._deployment_state_manager,
self._autoscaling_state_manager,
self._endpoint_state,
self._logging_config,
external_scaler_enabled,
)
ServeUsageTag.NUM_APPS.record(str(len(self._application_states)))
deployment_infos = {
params["deployment_name"]: deploy_args_to_deployment_info(
**params, app_name=name
)
for params in deployment_args
}
self._application_states[name].deploy_app(
deployment_infos, external_scaler_enabled
)
def deploy_app(
self,
name: str,
deployment_args: List[Dict],
application_args: ApplicationArgsProto,
) -> None:
"""Deploy the specified app to the list of deployment arguments.
This function should only be called if the app is being deployed
through serve.run instead of from a config.
Args:
name: application name
deployment_args_list: arguments for deploying a list of deployments.
application_args: application arguments.
"""
self.deploy_apps({name: deployment_args}, {name: application_args})
def apply_app_configs(
self,
app_configs: List[ServeApplicationSchema],
*,
deployment_time: float = 0,
target_capacity: Optional[float] = None,
target_capacity_direction: Optional[TargetCapacityDirection] = None,
):
"""Declaratively apply the list of application configs.
The applications will be reconciled to match the target state of the config.
Any applications previously deployed declaratively that are *not* present in
the list will be deleted.
"""
for app_config in app_configs:
if app_config.name not in self._application_states:
logger.info(f"Deploying new app '{app_config.name}'.")
self._application_states[app_config.name] = ApplicationState(
app_config.name,
self._deployment_state_manager,
self._autoscaling_state_manager,
endpoint_state=self._endpoint_state,
logging_config=self._logging_config,
external_scaler_enabled=app_config.external_scaler_enabled,
)
self._application_states[app_config.name].apply_app_config(
app_config,
target_capacity,
target_capacity_direction,
deployment_time=deployment_time,
)
# Delete all apps that were previously deployed via the declarative API
# but are not in the config being applied.
existing_apps = {
name
for name, app_state in self._application_states.items()
if app_state.api_type == APIType.DECLARATIVE
}
apps_in_config = {app_config.name for app_config in app_configs}
for app_to_delete in existing_apps - apps_in_config:
self.delete_app(app_to_delete)
ServeUsageTag.NUM_APPS.record(str(len(self._application_states)))
def get_deployments(self, app_name: str) -> List[str]:
"""Return all deployment names by app name"""
if app_name not in self._application_states:
return []
return self._application_states[app_name].target_deployments
def get_deployments_statuses(self, app_name: str) -> List[DeploymentStatusInfo]:
"""Return all deployment statuses by app name"""
if app_name not in self._application_states:
return []
return self._application_states[app_name].get_deployments_statuses()
def get_app_status(self, name: str) -> ApplicationStatus:
if name not in self._application_states:
return ApplicationStatus.NOT_STARTED
return self._application_states[name].status
def does_app_exist(self, name: str) -> bool:
return name in self._application_states
def get_app_status_info(self, name: str) -> ApplicationStatusInfo:
if name not in self._application_states:
return ApplicationStatusInfo(
ApplicationStatus.NOT_STARTED,
message=f"Application {name} doesn't exist",
deployment_timestamp=0,
)
return self._application_states[name].get_application_status_info()
def get_docs_path(self, app_name: str) -> Optional[str]:
return self._application_states[app_name].docs_path
def get_route_prefix(self, name: str) -> Optional[str]:
return self._application_states[name].route_prefix
def get_ingress_deployment_name(self, name: str) -> Optional[str]:
if name not in self._application_states:
return None
return self._application_states[name].ingress_deployment
def get_app_source(self, name: str) -> APIType:
return self._application_states[name].api_type
def get_external_scaler_enabled(self, app_name: str) -> bool:
"""Check if external scaler is enabled for the application.
Args:
app_name: Name of the application.
Returns:
True if external_scaler_enabled is set for the application, False otherwise.
"""
return (
self.does_app_exist(app_name)
and self._application_states[app_name].external_scaler_enabled
)
def list_app_statuses(
self, source: Optional[APIType] = None
) -> Dict[str, ApplicationStatusInfo]:
"""Return a dictionary with {app name: application info}
Args:
source: Optional API type filter. If provided, only returns apps
deployed via the specified API type.
Returns:
Dict[str, ApplicationStatusInfo]: A dictionary mapping application names
to their corresponding status information.
"""
if source is None:
return {
name: self._application_states[name].get_application_status_info()
for name in self._application_states
}
else:
return {
name: self._application_states[name].get_application_status_info()
for name in self._application_states
if self.get_app_source(name) is source
}
def list_deployment_details(self, name: str) -> Dict[str, DeploymentDetails]:
"""Gets detailed info on all deployments in specified application."""
if name not in self._application_states:
return {}
return self._application_states[name].list_deployment_details()
def get_deployment_topology(self, app_name: str) -> Optional[DeploymentTopology]:
"""Get the deployment topology for an application.
Args:
app_name: Name of the application.
Returns:
The deployment topology for the application, or None if the application
doesn't exist or the topology hasn't been built yet.
"""
if app_name not in self._application_states:
return None
return self._application_states[app_name].get_deployment_topology()
def update(self):
"""Update each application state."""
apps_to_be_deleted = []
any_target_state_changed = False
for name, app in self._application_states.items():
if app.should_autoscale():
any_target_state_changed = app.autoscale() or any_target_state_changed
ready_to_be_deleted, app_target_state_changed = app.update()
any_target_state_changed = (
any_target_state_changed or app_target_state_changed
)
if ready_to_be_deleted:
apps_to_be_deleted.append(name)
logger.debug(f"Application '{name}' deleted successfully.")
if len(apps_to_be_deleted) > 0:
for app_name in apps_to_be_deleted:
self._autoscaling_state_manager.deregister_application(app_name)
del self._application_states[app_name]
ServeUsageTag.NUM_APPS.record(str(len(self._application_states)))
if any_target_state_changed:
self.save_checkpoint()
self._deployment_state_manager.save_checkpoint()
def shutdown(self) -> None:
self._shutting_down = True
for app_state in self._application_states.values():
app_state.delete()
self._kv_store.delete(CHECKPOINT_KEY)
def is_ready_for_shutdown(self) -> bool:
"""Return whether all applications have shut down.
Iterate through all application states and check if all their applications
are deleted.
"""
return self._shutting_down and all(
app_state.is_deleted() for app_state in self._application_states.values()
)
def save_checkpoint(self) -> None:
"""Write a checkpoint of all application states."""
if self._shutting_down:
# Once we're told to shut down, stop writing checkpoints.
# Calling .shutdown() deletes any existing checkpoint.
return
application_state_info = {
app_name: app_state.get_checkpoint_data()
for app_name, app_state in self._application_states.items()
}
self._kv_store.put(
CHECKPOINT_KEY,
cloudpickle.dumps(application_state_info),
)
@ray.remote(num_cpus=0, max_calls=1)
def build_serve_application(
import_path: str,
code_version: str,
name: str,
args: Dict,
logging_config: LoggingConfig,
application_autoscaling_policy_function: Optional[str],
deployment_to_autoscaling_policy_function: Dict[str, str],
deployment_to_request_router_cls: Dict[str, str],
) -> Tuple[Optional[bytes], Optional[List[Dict]], Optional[str]]:
"""Import and build a Serve application.
Args:
import_path: import path to top-level bound deployment.
code_version: code version inferred from app config. All
deployment versions are set to this code version.
name: application name. If specified, application will be deployed
without removing existing applications.
args: Arguments to be passed to the application builder.
logging_config: the logging config for the build app task.
application_autoscaling_policy_function: the application autoscaling policy function name
deployment_to_autoscaling_policy_function: a dictionary mapping deployment names to autoscaling policy function names
deployment_to_request_router_cls: a dictionary mapping deployment names to request router class names
Returns:
Serialized application autoscaling policy def: a serialized autoscaling
policy def for the application if it was built successfully, otherwise None.
Deploy arguments: a list of deployment arguments if application
was built successfully, otherwise None.
Error message: a string if an error was raised, otherwise None.
"""
configure_component_logger(
component_name="controller",
component_id=f"build_{name}_{os.getpid()}",
logging_config=logging_config,
)
try:
from ray.serve._private.api import call_user_app_builder_with_args_if_necessary
# Import and build the application.
args_info_str = f" with arguments {args}" if args else ""
logger.info(f"Importing application '{name}'{args_info_str}.")
app = call_user_app_builder_with_args_if_necessary(
import_attr(import_path), args
)
deploy_args_list = []
built_app: BuiltApplication = build_app(
app,
name=name,
default_runtime_env=ray.get_runtime_context().runtime_env,
)
num_ingress_deployments = 0
def _get_serialized_def(attr_path: str) -> bytes:
module, attr = import_module_and_attr(attr_path)
cloudpickle.register_pickle_by_value(module)
serialized = cloudpickle.dumps(attr)
cloudpickle.unregister_pickle_by_value(module)
return serialized
application_serialized_autoscaling_policy_def = None
if application_autoscaling_policy_function is not None:
application_serialized_autoscaling_policy_def = _get_serialized_def(
application_autoscaling_policy_function
)
for deployment in built_app.deployments:
if inspect.isclass(deployment.func_or_class) and issubclass(
deployment.func_or_class, ASGIAppReplicaWrapper
):
num_ingress_deployments += 1
is_ingress = deployment.name == built_app.ingress_deployment_name
deployment_to_serialized_autoscaling_policy_def = None
deployment_to_serialized_request_router_cls = None
if deployment.name in deployment_to_autoscaling_policy_function:
deployment_to_serialized_autoscaling_policy_def = _get_serialized_def(
deployment_to_autoscaling_policy_function[deployment.name]
)
if deployment.name in deployment_to_request_router_cls:
deployment_to_serialized_request_router_cls = _get_serialized_def(
deployment_to_request_router_cls[deployment.name]
)
deploy_args_list.append(
get_deploy_args(
name=deployment._name,
replica_config=deployment._replica_config,
ingress=is_ingress,
deployment_config=deployment._deployment_config,
version=code_version,
route_prefix="/" if is_ingress else None,
serialized_autoscaling_policy_def=deployment_to_serialized_autoscaling_policy_def,
serialized_request_router_cls=deployment_to_serialized_request_router_cls,
)
)
if num_ingress_deployments > 1:
return (
None,
None,
(
f'Found multiple FastAPI deployments in application "{built_app.name}". '
"Please only include one deployment with @serve.ingress "
"in your application to avoid this issue."
),
)
return application_serialized_autoscaling_policy_def, deploy_args_list, None
except KeyboardInterrupt:
# Error is raised when this task is canceled with ray.cancel(), which
# happens when deploy_apps() is called.
logger.info(
"Existing config deployment request terminated because of keyboard "
"interrupt."
)
return None, None, None
except Exception:
logger.error(
f"Exception importing application '{name}'.\n{traceback.format_exc()}"
)
return None, None, traceback.format_exc()
def override_deployment_info(
deployment_infos: Dict[str, DeploymentInfo],
override_config: Optional[ServeApplicationSchema],
deployment_to_serialized_autoscaling_policy_def: Optional[Dict[str, bytes]] = None,
deployment_to_serialized_request_router_cls: Optional[Dict[str, bytes]] = None,
) -> Dict[str, DeploymentInfo]:
"""Override deployment infos with options from app config.
Args:
app_name: application name
deployment_infos: deployment info loaded from code
override_config: application config deployed by user with
options to override those loaded from code.
deployment_to_serialized_autoscaling_policy_def: serialized autoscaling policy def for each deployment
deployment_to_serialized_request_router_cls: serialized request router cls for each deployment
Returns: the updated deployment infos.
Raises:
ValueError: If config options have invalid values.
TypeError: If config options have invalid types.
"""
deployment_infos = deepcopy(deployment_infos)
if override_config is None:
return deployment_infos
config_dict = override_config.dict(exclude_unset=True)
deployment_override_options = config_dict.get("deployments", [])
# Override options for each deployment listed in the config.
for options in deployment_override_options:
if "max_ongoing_requests" in options:
options["max_ongoing_requests"] = options.get("max_ongoing_requests")
deployment_name = options["name"]
if deployment_name not in deployment_infos:
raise ValueError(
f"Deployment '{deployment_name}' does not exist. "
f"Available: {list(deployment_infos.keys())}"
)
info = deployment_infos[deployment_name]
original_options = info.deployment_config.dict()
original_options["user_configured_option_names"].update(set(options))
# Override `max_ongoing_requests` and `autoscaling_config` if
# `num_replicas="auto"`
if options.get("num_replicas") == "auto":
options["num_replicas"] = None
new_config = AutoscalingConfig.default().dict()
# If `autoscaling_config` is specified, its values override
# the default `num_replicas="auto"` configuration
autoscaling_config = (
options.get("autoscaling_config")
or info.deployment_config.autoscaling_config
)
if autoscaling_config:
new_config.update(autoscaling_config)
if (
deployment_to_serialized_autoscaling_policy_def
and deployment_name in deployment_to_serialized_autoscaling_policy_def
):
# By setting the serialized policy def, AutoscalingConfig constructor will not
# try to import the policy from the string import path
policy_obj = AutoscalingPolicy.from_serialized_policy_def(
new_config["policy"],
deployment_to_serialized_autoscaling_policy_def[deployment_name],
)
new_config["policy"] = policy_obj
options["autoscaling_config"] = AutoscalingConfig(**new_config)
ServeUsageTag.AUTO_NUM_REPLICAS_USED.record("1")
# What to pass to info.update
override_options = {}
# Merge app-level and deployment-level runtime_envs.
replica_config = info.replica_config
app_runtime_env = override_config.runtime_env
if "ray_actor_options" in options:
# If specified, get ray_actor_options from config
override_actor_options = options.pop("ray_actor_options", {})
else:
# Otherwise, get options from application code (and default to {}
# if the code sets options to None).
override_actor_options = replica_config.ray_actor_options or {}
override_placement_group_bundles = options.pop(
"placement_group_bundles", replica_config.placement_group_bundles
)
override_placement_group_strategy = options.pop(
"placement_group_strategy", replica_config.placement_group_strategy
)
override_max_replicas_per_node = options.pop(
"max_replicas_per_node", replica_config.max_replicas_per_node
)
# Record telemetry for container runtime env feature at deployment level
if override_actor_options.get("runtime_env") and (
override_actor_options["runtime_env"].get("container")
or override_actor_options["runtime_env"].get("image_uri")
):
ServeUsageTag.DEPLOYMENT_CONTAINER_RUNTIME_ENV_USED.record("1")
merged_env = override_runtime_envs_except_env_vars(
app_runtime_env, override_actor_options.get("runtime_env", {})
)
override_actor_options.update({"runtime_env": merged_env})
replica_config.update(
ray_actor_options=override_actor_options,
placement_group_bundles=override_placement_group_bundles,
placement_group_strategy=override_placement_group_strategy,
max_replicas_per_node=override_max_replicas_per_node,
)
override_options["replica_config"] = replica_config
if "request_router_config" in options:
request_router_config = options.get("request_router_config")
if request_router_config:
if (
deployment_to_serialized_request_router_cls
and deployment_name in deployment_to_serialized_request_router_cls
):
# By setting the serialized request router cls, RequestRouterConfig constructor will not
# try to import the request router cls from the string import path
options[
"request_router_config"
] = RequestRouterConfig.from_serialized_request_router_cls(
request_router_config,
deployment_to_serialized_request_router_cls[deployment_name],
)
else:
options["request_router_config"] = RequestRouterConfig(
**request_router_config
)
# Override deployment config options
options.pop("name", None)
original_options.update(options)
override_options["deployment_config"] = DeploymentConfig(**original_options)
deployment_infos[deployment_name] = info.update(**override_options)
deployment_config = deployment_infos[deployment_name].deployment_config
if (
deployment_config.autoscaling_config is not None
and deployment_config.max_ongoing_requests
< deployment_config.autoscaling_config.get_target_ongoing_requests()
):
logger.warning(
"Autoscaling will never happen, "
"because 'max_ongoing_requests' is less than "
"'target_ongoing_requests' now."
)
# Overwrite ingress route prefix
app_route_prefix = config_dict.get("route_prefix", DEFAULT.VALUE)
validate_route_prefix(app_route_prefix)
for deployment in list(deployment_infos.values()):
if (
app_route_prefix is not DEFAULT.VALUE
and deployment.route_prefix is not None
):
deployment.route_prefix = app_route_prefix
return deployment_infos
| ApplicationStateManager |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/annotations/method_annotation.py | {
"start": 216,
"end": 643
} | class ____:
def eat(self, food: Banana | Coconut) -> None:
print(f"Monkey eats {food}")
def munch(self, food: Union[Leaf, Insect]) -> None:
print(f"Monkey munches {food}")
def jump(self, height: Optional[int] = 10) -> None:
print(f"Monkey jumps {height}")
def scream(self, volume: int | None) -> Sound:
if volume is None:
volume = 0
return Sound(volume)
| Monkey |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/gcs.py | {
"start": 1615,
"end": 5833
} | class ____(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:param object: The name of the object to check in the Google cloud
storage bucket.
:param use_glob: When set to True the object parameter is interpreted as glob
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: (Optional) How to retry the RPC
"""
template_fields: Sequence[str] = (
"bucket",
"object",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
bucket: str,
object: str,
use_glob: bool = False,
google_cloud_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
retry: Retry = DEFAULT_RETRY,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.use_glob = use_glob
self.google_cloud_conn_id = google_cloud_conn_id
self._matches: bool = False
self.impersonation_chain = impersonation_chain
self.retry = retry
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
self.log.info("Sensor checks existence of : %s, %s", self.bucket, self.object)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
self._matches = (
bool(hook.list(self.bucket, match_glob=self.object))
if self.use_glob
else hook.exists(self.bucket, self.object, self.retry)
)
return self._matches
def execute(self, context: Context):
"""Airflow runs this method on the worker and defers using the trigger."""
if self.deferrable:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=GCSBlobTrigger(
bucket=self.bucket,
object_name=self.object,
use_glob=self.use_glob,
poke_interval=self.poke_interval,
google_cloud_conn_id=self.google_cloud_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
else:
super().execute(context)
return self._matches
def execute_complete(self, context: Context, event: dict[str, str]) -> bool:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info("File %s was found in bucket %s.", self.object, self.bucket)
return True
def ts_function(context):
"""
Act as a default callback for the GoogleCloudStorageObjectUpdatedSensor.
The default behaviour is check for the object being updated after the data
interval's end.
"""
return context["data_interval_end"]
| GCSObjectExistenceSensor |
python | pandas-dev__pandas | pandas/tests/series/methods/test_astype.py | {
"start": 18963,
"end": 25741
} | class ____:
def test_astype_categorical_to_other(self):
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
expected = ser
tm.assert_series_equal(ser.astype("category"), expected)
tm.assert_series_equal(ser.astype(CategoricalDtype()), expected)
msg = r"Cannot cast object|str dtype to float64"
with pytest.raises(ValueError, match=msg):
ser.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"], dtype="str")
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype("int")
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(ser.values), name="value_group")
cmp(ser.astype("object"), expected)
cmp(ser.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(ser), np.array(ser.values))
tm.assert_series_equal(ser.astype("category"), ser)
tm.assert_series_equal(ser.astype(CategoricalDtype()), ser)
roundtrip_expected = ser.cat.set_categories(
ser.cat.categories.sort_values()
).cat.remove_unused_categories()
result = ser.astype("object").astype("category")
tm.assert_series_equal(result, roundtrip_expected)
result = ser.astype("object").astype(CategoricalDtype())
tm.assert_series_equal(result, roundtrip_expected)
def test_astype_categorical_invalid_conversions(self):
# invalid conversion (these are NOT a dtype)
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
msg = "dtype '<class 'pandas.Categorical'>' not understood"
with pytest.raises(TypeError, match=msg):
ser.astype(Categorical)
with pytest.raises(TypeError, match=msg):
ser.astype("object").astype(Categorical)
def test_astype_categoricaldtype(self):
ser = Series(["a", "b", "a"])
result = ser.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = ser.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = ser.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH#10696, GH#18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
ser = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = ser.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
msg = "Constructing a Categorical with a dtype and values containing"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = ser.astype(dtype)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = ser
result = ser.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
ser = Series([True, False, np.nan])
assert ser.dtypes == np.object_
result = ser.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categories_raises(self):
# deprecated GH#17636, removed in GH#27141
ser = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
ser.astype("category", categories=["a", "b"], ordered=True)
@pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]])
def test_astype_from_categorical(self, items):
ser = Series(items)
exp = Series(Categorical(items))
res = ser.astype("category")
tm.assert_series_equal(res, exp)
def test_astype_from_categorical_with_keywords(self):
# with keywords
lst = ["a", "b", "c", "a"]
ser = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = ser.astype(CategoricalDtype(None, ordered=True))
tm.assert_series_equal(res, exp)
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
res = ser.astype(CategoricalDtype(list("abcdef"), ordered=True))
tm.assert_series_equal(res, exp)
def test_astype_timedelta64_with_np_nan(self):
# GH45798
result = Series([Timedelta(1), np.nan], dtype="timedelta64[ns]")
expected = Series([Timedelta(1), NaT], dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected)
@td.skip_if_no("pyarrow")
def test_astype_int_na_string(self):
# GH#57418
ser = Series([12, NA], dtype="Int64[pyarrow]")
result = ser.astype("string[pyarrow]")
expected = Series(["12", NA], dtype="string[pyarrow]")
tm.assert_series_equal(result, expected)
| TestAstypeCategorical |
python | python-poetry__poetry | tests/conftest.py | {
"start": 5837,
"end": 6418
} | class ____(KeyringBackend):
@properties.classproperty
def priority(self) -> float:
return 42
def set_password(self, service: str, username: str, password: str) -> None:
raise KeyringLocked()
def get_password(self, service: str, username: str) -> str | None:
raise KeyringLocked()
def get_credential(
self,
service: str,
username: str | None,
) -> Credential | None:
raise KeyringLocked()
def delete_password(self, service: str, username: str) -> None:
raise KeyringLocked()
| LockedBackend |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/type_annotations_test.py | {
"start": 887,
"end": 1074
} | class ____(reference_test_base.TestCase):
def test_pure_declaration(self):
self.assertFunctionMatchesEager(pure_declaration)
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | doocs__leetcode | lcof/面试题59 - I. 滑动窗口的最大值/Solution.py | {
"start": 0,
"end": 402
} | class ____:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
q = deque()
ans = []
for i, x in enumerate(nums):
if q and i - q[0] + 1 > k:
q.popleft()
while q and nums[q[-1]] <= x:
q.pop()
q.append(i)
if i >= k - 1:
ans.append(nums[q[0]])
return ans
| Solution |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/tests/test_vector_stores_vectorx.py | {
"start": 7052,
"end": 8838
} | class ____(VectorXTestSetup):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.embed_model = HuggingFaceEmbedding(
"sentence-transformers/all-MiniLM-L6-v2", device="cpu"
)
cls.vector_store = VectorXVectorStore.from_params(
api_token=cls.vecx_api_token,
index_name=cls.test_index_name,
encryption_key=cls.encryption_key,
dimension=cls.dimension,
space_type=cls.space_type,
)
cls.storage_context = StorageContext.from_defaults(
vector_store=cls.vector_store
)
Settings.llm = None
cls.index = VectorStoreIndex.from_documents(
cls.test_documents,
storage_context=cls.storage_context,
embed_model=cls.embed_model,
)
def test_basic_query(self):
query_text = "What is Python?"
query_embedding = self.embed_model.get_text_embedding(query_text)
query = VectorStoreQuery(query_embedding=query_embedding, similarity_top_k=2)
results = self.vector_store.query(query)
self.assertGreater(len(results.nodes), 0)
def test_filtered_query(self):
query_text = "Explain machine learning"
query_embedding = self.embed_model.get_text_embedding(query_text)
ai_filter = MetadataFilter(
key="category", value="ai", operator=FilterOperator.EQ
)
query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=2,
filters=MetadataFilters(filters=[ai_filter]),
)
results = self.vector_store.query(query)
self.assertGreater(len(results.nodes), 0)
# ------------------ Mocked VectorX Tests ------------------
| TestQueryAndFilter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format22.py | {
"start": 315,
"end": 1624
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format22.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [108321024, 108328448]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"border": {"color": "yellow"},
"fill": {"color": "red", "transparency": 1},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/admin_views/models.py | {
"start": 836,
"end": 2014
} | class ____(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True)
another_section = models.ForeignKey(
Section, models.CASCADE, null=True, blank=True, related_name="+"
)
sub_section = models.ForeignKey(
Section, models.SET_NULL, null=True, blank=True, related_name="+"
)
def __str__(self):
return self.title
@admin.display(ordering="date", description="")
def model_year(self):
return self.date.year
@admin.display(ordering="-date", description="")
def model_year_reversed(self):
return self.date.year
@property
@admin.display(ordering="date")
def model_property_year(self):
return self.date.year
@property
def model_month(self):
return self.date.month
@property
@admin.display(description="Is from past?", boolean=True)
def model_property_is_from_past(self):
return self.date < timezone.now()
| Article |
python | paramiko__paramiko | tests/test_config.py | {
"start": 15326,
"end": 17071
} | class ____:
def test_SSHConfigDict_construct_empty(self):
assert not SSHConfigDict()
def test_SSHConfigDict_construct_from_list(self):
assert SSHConfigDict([(1, 2)])[1] == 2
def test_SSHConfigDict_construct_from_dict(self):
assert SSHConfigDict({1: 2})[1] == 2
@mark.parametrize("true_ish", ("yes", "YES", "Yes", True))
def test_SSHConfigDict_as_bool_true_ish(self, true_ish):
assert SSHConfigDict({"key": true_ish}).as_bool("key") is True
@mark.parametrize("false_ish", ("no", "NO", "No", False))
def test_SSHConfigDict_as_bool(self, false_ish):
assert SSHConfigDict({"key": false_ish}).as_bool("key") is False
@mark.parametrize("int_val", ("42", 42))
def test_SSHConfigDict_as_int(self, int_val):
assert SSHConfigDict({"key": int_val}).as_int("key") == 42
@mark.parametrize("non_int", ("not an int", None, object()))
def test_SSHConfigDict_as_int_failures(self, non_int):
conf = SSHConfigDict({"key": non_int})
try:
int(non_int)
except Exception as e:
exception_type = type(e)
with raises(exception_type):
conf.as_int("key")
def test_SSHConfig_host_dicts_are_SSHConfigDict_instances(self):
config = SSHConfig.from_text(
"""
Host *.example.com
Port 2222
Host *
Port 3333
"""
)
assert config.lookup("foo.example.com").as_int("port") == 2222
def test_SSHConfig_wildcard_host_dicts_are_SSHConfigDict_instances(self):
config = SSHConfig.from_text(
"""
Host *.example.com
Port 2222
Host *
Port 3333
"""
)
assert config.lookup("anything-else").as_int("port") == 3333
| TestSSHConfigDict |
python | numba__llvmlite | llvmlite/ir/values.py | {
"start": 31551,
"end": 31861
} | class ____(_BaseArgument):
"""
The specification of a function's return value.
"""
def __str__(self):
attrs = self.attributes._to_list(self.type)
if attrs:
return "{0} {1}".format(' '.join(attrs), self.type)
else:
return str(self.type)
| ReturnValue |
python | ipython__ipython | IPython/core/crashhandler.py | {
"start": 3174,
"end": 8747
} | class ____:
"""Customizable crash handlers for IPython applications.
Instances of this class provide a :meth:`__call__` method which can be
used as a ``sys.excepthook``. The :meth:`__call__` signature is::
def __call__(self, etype, evalue, etb)
"""
message_template = _default_message_template
section_sep = '\n\n'+'*'*75+'\n\n'
info: Dict[str, Optional[str]]
def __init__(
self,
app: Application,
contact_name: Optional[str] = None,
contact_email: Optional[str] = None,
bug_tracker: Optional[str] = None,
show_crash_traceback: bool = True,
call_pdb: bool = False,
):
"""Create a new crash handler
Parameters
----------
app : Application
A running :class:`Application` instance, which will be queried at
crash time for internal information.
contact_name : str
A string with the name of the person to contact.
contact_email : str
A string with the email address of the contact.
bug_tracker : str
A string with the URL for your project's bug tracker.
show_crash_traceback : bool
If false, don't print the crash traceback on stderr, only generate
the on-disk report
call_pdb
Whether to call pdb on crash
Attributes
----------
These instances contain some non-argument attributes which allow for
further customization of the crash handler's behavior. Please see the
source for further details.
"""
self.crash_report_fname = "Crash_report_%s.txt" % app.name
self.app = app
self.call_pdb = call_pdb
#self.call_pdb = True # dbg
self.show_crash_traceback = show_crash_traceback
self.info = dict(app_name = app.name,
contact_name = contact_name,
contact_email = contact_email,
bug_tracker = bug_tracker,
crash_report_fname = self.crash_report_fname)
def __call__(
self,
etype: type[BaseException],
evalue: BaseException,
etb: types.TracebackType,
) -> None:
"""Handle an exception, call for compatible with sys.excepthook"""
# do not allow the crash handler to be called twice without reinstalling it
# this prevents unlikely errors in the crash handling from entering an
# infinite loop.
sys.excepthook = sys.__excepthook__
# Use this ONLY for developer debugging (keep commented out for release)
ipython_dir = getattr(self.app, "ipython_dir", None)
if ipython_dir is not None:
assert isinstance(ipython_dir, str)
rptdir = Path(ipython_dir)
else:
rptdir = Path.cwd()
if not rptdir.is_dir():
rptdir = Path.cwd()
report_name = rptdir / self.crash_report_fname
# write the report filename into the instance dict so it can get
# properly expanded out in the user message template
self.crash_report_fname = str(report_name)
self.info["crash_report_fname"] = str(report_name)
TBhandler = ultratb.VerboseTB(
theme_name="nocolor",
long_header=True,
call_pdb=self.call_pdb,
)
if self.call_pdb:
TBhandler(etype,evalue,etb)
return
else:
traceback = TBhandler.text(etype,evalue,etb,context=31)
# print traceback to screen
if self.show_crash_traceback:
print(traceback, file=sys.stderr)
# and generate a complete report on disk
try:
report = open(report_name, "w", encoding="utf-8")
except:
print('Could not create crash report on disk.', file=sys.stderr)
return
with report:
# Inform user on stderr of what happened
print('\n'+'*'*70+'\n', file=sys.stderr)
print(self.message_template.format(**self.info), file=sys.stderr)
# Construct report on disk
report.write(self.make_report(str(traceback)))
builtin_mod.input("Hit <Enter> to quit (your terminal may close):")
def make_report(self, traceback: str) -> str:
"""Return a string containing a crash report."""
sec_sep = self.section_sep
report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
rpt_add = report.append
rpt_add(sys_info())
try:
config = pformat(self.app.config)
rpt_add(sec_sep)
rpt_add("Application name: %s\n\n" % self.app.name)
rpt_add("Current user configuration structure:\n\n")
rpt_add(config)
except:
pass
rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
return ''.join(report)
def crash_handler_lite(
etype: type[BaseException], evalue: BaseException, tb: types.TracebackType
) -> None:
"""a light excepthook, adding a small message to the usual traceback"""
traceback.print_exception(etype, evalue, tb)
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
# we are in a Shell environment, give %magic example
config = "%config "
else:
# we are not in a shell, show generic config
config = "c."
print(_lite_message_template.format(email=author_email, config=config, version=version), file=sys.stderr)
| CrashHandler |
python | great-expectations__great_expectations | great_expectations/core/expectation_validation_result.py | {
"start": 18516,
"end": 18938
} | class ____(TypedDict):
active_batch_definition: LegacyBatchDefinition
batch_markers: BatchMarkers
batch_parameters: dict | None
batch_spec: BatchSpec
checkpoint_id: Optional[str]
checkpoint_name: str
expectation_suite_name: str
great_expectations_version: str
run_id: RunIdentifier
validation_id: Optional[str]
validation_time: str
@public_api
| ExpectationSuiteValidationResultMeta |
python | ApeWorX__ape | tests/functional/test_exceptions.py | {
"start": 1116,
"end": 1328
} | class ____:
def test_shows_line_number(self):
actual = str(Abort())
expected = re.compile(r"Operation aborted in [\w<>.]*::[\w<>]* on line \d+\.")
assert expected.match(actual)
| TestAbort |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.