language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/1893. Check if All the Integers in a Range Are Covered/1893-2.py | {
"start": 0,
"end": 296
} | class ____:
def isCovered(self, ranges: list[list[int]], left: int, right: int) -> bool:
seen = [0] * 52
for l, r in ranges:
seen[l] += 1
seen[r + 1] -= 1
for i in range(1, 52):
seen[i] += seen[i - 1]
return all(seen[i] for i in range(left, right + 1))
| Solution |
python | modin-project__modin | modin/pandas/groupby.py | {
"start": 70034,
"end": 81483
} | class ____(DataFrameGroupBy): # noqa: GL08
_pandas_class = pandas.core.groupby.SeriesGroupBy
_extensions: EXTENSION_DICT_TYPE = EXTENSION_DICT_TYPE(dict)
@disable_logging
def __getattribute__(self, item: str) -> Any:
"""
Get an attribute of the object.
Python calls this method for every attribute access. We override it to
get extension attributes.
Parameters
----------
item : str
Attribute name.
Returns
-------
Any
The value of the attribute.
"""
if item not in GROUPBY_EXTENSION_NO_LOOKUP:
extensions_result = self._getattribute__from_extension_impl(
item, __class__._extensions
)
if extensions_result is not sentinel:
return extensions_result
return super().__getattribute__(item)
@_inherit_docstrings(QueryCompilerCaster._getattr__from_extension_impl)
def __getattr__(self, key: str) -> Any:
return self._getattr__from_extension_impl(
key=key,
default_behavior_attributes=GROUPBY_EXTENSION_NO_LOOKUP,
extensions=__class__._extensions,
)
@disable_logging
def __setattr__(self, key: str, value: Any) -> None:
"""
Set an attribute of the object.
We override this method to support settable extension attributes.
Parameters
----------
key : str
Attribute name.
value : Any
Value to set the attribute to.
Returns
-------
None
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(key, __class__._extensions)
if extension is not sentinel and hasattr(extension, "__set__"):
return extension.__set__(self, value)
return super().__setattr__(key, value)
@disable_logging
def __delattr__(self, name: str) -> None:
"""
Delete an attribute of the object.
We override this method to support deletable extension attributes.
Parameters
----------
name : str
Attribute name.
Returns
-------
None
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(name, __class__._extensions)
if extension is not sentinel and hasattr(extension, "__delete__"):
return extension.__delete__(self)
return super().__delattr__(name)
@property
def ndim(self):
"""
Return 1.
Returns
-------
int
Returns 1.
Notes
-----
Deprecated and removed in pandas and will be likely removed in Modin.
"""
return 1 # ndim is always 1 for Series
@property
def _iter(self):
"""
Construct a tuple of (group_id, Series) tuples to allow iteration over groups.
Returns
-------
generator
Generator expression of GroupBy object broken down into tuples for iteration.
"""
indices = self.indices
group_ids = indices.keys()
if self._axis == 0:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_row_array(
indices[k]
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
else:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_column_array(
indices[k], numeric=True
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
def _try_get_str_func(self, fn):
"""
Try to convert a groupby aggregation function to a string or list of such.
Parameters
----------
fn : callable, str, or Iterable
Returns
-------
str, list
If `fn` is a callable, return its name, otherwise return `fn` itself.
If `fn` is a string, return it. If `fn` is an Iterable, return a list
of _try_get_str_func applied to each element of `fn`.
"""
if not isinstance(fn, str) and isinstance(fn, Iterable):
return [self._try_get_str_func(f) for f in fn]
return fn.__name__ if callable(fn) else fn
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
): # noqa: GL08
return self._default_to_pandas(
lambda ser: ser.value_counts(
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
dropna=dropna,
)
)
def corr(self, other, method="pearson", min_periods=None):
return self._wrap_aggregation(
type(self._query_compiler).groupby_corr,
agg_kwargs=dict(other=other, method=method, min_periods=min_periods),
)
def cov(self, other, min_periods=None, ddof=1):
return self._wrap_aggregation(
type(self._query_compiler).groupby_cov,
agg_kwargs=dict(other=other, min_periods=min_periods, ddof=ddof),
)
def describe(self, percentiles=None, include=None, exclude=None):
return self._default_to_pandas(
lambda df: df.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
def idxmax(self, axis=lib.no_default, skipna=True):
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "idxmax")
else:
axis = 0
return self._wrap_aggregation(
type(self._query_compiler).groupby_idxmax,
agg_kwargs=dict(axis=axis, skipna=skipna),
)
def idxmin(self, axis=lib.no_default, skipna=True):
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "idxmin")
else:
axis = 0
return self._wrap_aggregation(
type(self._query_compiler).groupby_idxmin,
agg_kwargs=dict(axis=axis, skipna=skipna),
)
def hist(
self,
by=None,
ax=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
figsize=None,
bins=10,
backend=None,
legend=False,
**kwargs,
):
return self._default_to_pandas(
lambda df: df.hist(
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
backend=backend,
legend=legend,
**kwargs,
)
)
@property
def is_monotonic_decreasing(self):
return self._default_to_pandas(lambda ser: ser.is_monotonic_decreasing)
@property
def is_monotonic_increasing(self):
return self._default_to_pandas(lambda ser: ser.is_monotonic_increasing)
@property
def dtype(self):
return self._default_to_pandas(lambda ser: ser.dtype)
def unique(self):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_unique,
numeric_only=False,
)
)
def nlargest(self, n=5, keep="first"):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_nlargest,
agg_kwargs=dict(n=n, keep=keep),
numeric_only=True,
)
)
def nsmallest(self, n=5, keep="first"):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_nsmallest,
agg_kwargs=dict(n=n, keep=keep),
numeric_only=True,
)
)
def _validate_func_kwargs(self, kwargs: dict):
"""
Validate types of user-provided "named aggregation" kwargs.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provided keys.
funcs : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs.
Raises
------
`TypeError` is raised if aggfunc is not `str` or callable.
Notes
-----
Copied from pandas.
"""
columns = list(kwargs)
funcs = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(
f"func is expected but received {type(col_func).__name__} in **kwargs."
)
funcs.append(col_func)
if not columns:
raise TypeError("Must provide 'func' or named aggregation **kwargs.")
return columns, funcs
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
engine_default = engine is None and engine_kwargs is None
# if func is None, will switch to user-provided "named aggregation" kwargs
if func_is_none := func is None:
columns, func = self._validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, dict) and engine_default:
raise SpecificationError("nested renamer is not supported")
elif is_list_like(func) and engine_default:
from .dataframe import DataFrame
result = DataFrame(
query_compiler=self._query_compiler.groupby_agg(
by=self._by,
agg_func=func,
axis=self._axis,
groupby_kwargs=self._kwargs,
agg_args=args,
agg_kwargs=kwargs,
)
)
# query compiler always gives result a multiindex on the axis with the
# function names, but series always gets a regular index on the columns
# because there is no need to identify which original column's aggregation
# the new column represents. alternatively we could give the query compiler
# a hint that it's for a series, not a dataframe.
if func_is_none:
return result.set_axis(labels=columns, axis=1, copy=False)
return result.set_axis(
labels=self._try_get_str_func(func), axis=1, copy=False
)
else:
return super().aggregate(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
agg = aggregate
| SeriesGroupBy |
python | PrefectHQ__prefect | src/prefect/_experimental/bundles/__init__.py | {
"start": 1206,
"end": 18675
} | class ____(TypedDict):
"""
A serialized bundle is a serialized function, context, and flow run that can be
easily transported for later execution.
"""
function: str
context: str
flow_run: dict[str, Any]
dependencies: str
def _serialize_bundle_object(obj: Any) -> str:
"""
Serializes an object to a string.
"""
return base64.b64encode(gzip.compress(cloudpickle.dumps(obj))).decode() # pyright: ignore[reportUnknownMemberType]
def _deserialize_bundle_object(serialized_obj: str) -> Any:
"""
Deserializes an object from a string.
"""
return cloudpickle.loads(gzip.decompress(base64.b64decode(serialized_obj)))
def _is_local_module(module_name: str, module_path: str | None = None) -> bool:
"""
Check if a module is a local module (not from standard library or site-packages).
Args:
module_name: The name of the module.
module_path: Optional path to the module file.
Returns:
True if the module is a local module, False otherwise.
"""
# Skip modules that are known to be problematic or not needed
skip_modules = {
"__pycache__",
# Skip test modules
"unittest",
"pytest",
"test_",
"_pytest",
# Skip prefect modules - they'll be available on remote
"prefect",
}
# Check module name prefixes
for skip in skip_modules:
if module_name.startswith(skip):
return False
# Check if it's a built-in module
if module_name in sys.builtin_module_names:
return False
# Check if it's in the standard library (Python 3.10+)
if hasattr(sys, "stdlib_module_names"):
# Check both full module name and base module name
base_module = module_name.split(".")[0]
if (
module_name in sys.stdlib_module_names
or base_module in sys.stdlib_module_names
):
return False
# If we have the module path, check if it's in site-packages or dist-packages
if module_path:
path_str = str(module_path)
# Also exclude standard library paths
if (
"site-packages" in path_str
or "dist-packages" in path_str
or "/lib/python" in path_str
or "/.venv/" in path_str
):
return False
else:
# Try to import the module to get its path
try:
module = importlib.import_module(module_name)
if hasattr(module, "__file__") and module.__file__:
path_str = str(module.__file__)
if (
"site-packages" in path_str
or "dist-packages" in path_str
or "/lib/python" in path_str
or "/.venv/" in path_str
):
return False
except (ImportError, AttributeError):
# If we can't import it, it's probably not a real module
return False
# Only consider it local if it exists and we can verify it
return True
def _extract_imports_from_source(source_code: str) -> set[str]:
"""
Extract all import statements from Python source code.
Args:
source_code: The Python source code to analyze.
Returns:
A set of imported module names.
"""
imports: set[str] = set()
try:
tree = ast.parse(source_code)
except SyntaxError:
logger.debug("Failed to parse source code for import extraction")
return imports
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
imports.add(alias.name)
elif isinstance(node, ast.ImportFrom):
if node.module:
imports.add(node.module)
# Don't add individual imported items as they might be classes/functions
# Only track the module itself
return imports
def _discover_local_dependencies(
flow: Flow[Any, Any], visited: set[str] | None = None
) -> set[str]:
"""
Recursively discover local module dependencies of a flow.
Args:
flow: The flow to analyze.
visited: Set of already visited modules to avoid infinite recursion.
Returns:
A set of local module names that should be serialized by value.
"""
if visited is None:
visited = set()
local_modules: set[str] = set()
# Get the module containing the flow
try:
flow_module = inspect.getmodule(flow.fn)
except (AttributeError, TypeError):
# Flow function doesn't have a module (e.g., defined in REPL)
return local_modules
if not flow_module:
return local_modules
module_name = flow_module.__name__
# Process the flow's module and all its dependencies recursively
_process_module_dependencies(flow_module, module_name, local_modules, visited)
return local_modules
def _process_module_dependencies(
module: ModuleType,
module_name: str,
local_modules: set[str],
visited: set[str],
) -> None:
"""
Recursively process a module and discover its local dependencies.
Args:
module: The module to process.
module_name: The name of the module.
local_modules: Set to accumulate discovered local modules.
visited: Set of already visited modules to avoid infinite recursion.
"""
# Skip if we've already processed this module
if module_name in visited:
return
visited.add(module_name)
# Check if this is a local module
module_file = getattr(module, "__file__", None)
if not module_file or not _is_local_module(module_name, module_file):
return
local_modules.add(module_name)
# Get the source code of the module
try:
source_code = inspect.getsource(module)
except (OSError, TypeError):
# Can't get source for this module
return
imports = _extract_imports_from_source(source_code)
# Check each import to see if it's local and recursively process it
for import_name in imports:
# Skip if already visited
if import_name in visited:
continue
# Try to resolve the import
imported_module = None
try:
# Handle relative imports by resolving them
if module_name and "." in module_name:
package = ".".join(module_name.split(".")[:-1])
try:
imported_module = importlib.import_module(import_name, package)
except ImportError:
imported_module = importlib.import_module(import_name)
else:
imported_module = importlib.import_module(import_name)
except (ImportError, AttributeError):
# Can't import, skip it
continue
# Recursively process this imported module
_process_module_dependencies(
imported_module, import_name, local_modules, visited
)
@contextmanager
def _pickle_local_modules_by_value(flow: Flow[Any, Any]):
"""
Context manager that registers local modules for pickle-by-value serialization.
Args:
flow: The flow whose dependencies should be registered.
"""
registered_modules: list[ModuleType] = []
try:
# Discover local dependencies
local_modules = _discover_local_dependencies(flow)
logger.debug("Local modules: %s", local_modules)
if local_modules:
logger.debug(
"Registering local modules for pickle-by-value serialization: %s",
", ".join(local_modules),
)
# Register each local module for pickle-by-value
for module_name in local_modules:
try:
module = importlib.import_module(module_name)
cloudpickle.register_pickle_by_value(module) # pyright: ignore[reportUnknownMemberType] Missing stubs
registered_modules.append(module)
except (ImportError, AttributeError) as e:
logger.debug(
"Failed to register module %s for pickle-by-value: %s",
module_name,
e,
)
yield
finally:
# Unregister all modules we registered
for module in registered_modules:
try:
cloudpickle.unregister_pickle_by_value(module) # pyright: ignore[reportUnknownMemberType] Missing stubs
except Exception as e:
logger.debug(
"Failed to unregister module %s from pickle-by-value: %s",
getattr(module, "__name__", module),
e,
)
def create_bundle_for_flow_run(
flow: Flow[Any, Any],
flow_run: FlowRun,
context: dict[str, Any] | None = None,
) -> SerializedBundle:
"""
Creates a bundle for a flow run.
Args:
flow: The flow to bundle.
flow_run: The flow run to bundle.
context: The context to use when running the flow.
Returns:
A serialized bundle.
"""
context = context or serialize_context()
dependencies = (
subprocess.check_output(
[
_get_uv_path(),
"pip",
"freeze",
# Exclude editable installs because we won't be able to install them in the execution environment
"--exclude-editable",
]
)
.decode()
.strip()
)
# Remove dependencies installed from a local file path because we won't be able
# to install them in the execution environment. The user will be responsible for
# making sure they are available in the execution environment
filtered_dependencies: list[str] = []
file_dependencies: list[str] = []
for line in dependencies.split("\n"):
if "file://" in line:
file_dependencies.append(line)
else:
filtered_dependencies.append(line)
dependencies = "\n".join(filtered_dependencies)
if file_dependencies:
logger.warning(
"The following dependencies were installed from a local file path and will not be "
"automatically installed in the execution environment: %s. If these dependencies "
"are not available in the execution environment, your flow run may fail.",
"\n".join(file_dependencies),
)
# Automatically register local modules for pickle-by-value serialization
with _pickle_local_modules_by_value(flow):
return {
"function": _serialize_bundle_object(flow),
"context": _serialize_bundle_object(context),
"flow_run": flow_run.model_dump(mode="json"),
"dependencies": dependencies,
}
def extract_flow_from_bundle(bundle: SerializedBundle) -> Flow[Any, Any]:
"""
Extracts a flow from a bundle.
"""
return _deserialize_bundle_object(bundle["function"])
def _extract_and_run_flow(
bundle: SerializedBundle,
cwd: Path | str | None = None,
env: dict[str, Any] | None = None,
) -> None:
"""
Extracts a flow from a bundle and runs it.
Designed to be run in a subprocess.
Args:
bundle: The bundle to extract and run.
cwd: The working directory to use when running the flow.
env: The environment to use when running the flow.
"""
os.environ.update(env or {})
# TODO: make this a thing we can pass directly to the engine
os.environ["PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS"] = "false"
settings_context = get_settings_context()
flow = _deserialize_bundle_object(bundle["function"])
context = _deserialize_bundle_object(bundle["context"])
flow_run = FlowRun.model_validate(bundle["flow_run"])
if cwd:
os.chdir(cwd)
with SettingsContext(
profile=settings_context.profile,
settings=Settings(),
):
with handle_engine_signals(flow_run.id):
maybe_coro = run_flow(
flow=flow,
flow_run=flow_run,
context=context,
)
if asyncio.iscoroutine(maybe_coro):
# This is running in a brand new process, so there won't be an existing
# event loop.
asyncio.run(maybe_coro)
def execute_bundle_in_subprocess(
bundle: SerializedBundle,
env: dict[str, Any] | None = None,
cwd: Path | str | None = None,
) -> multiprocessing.context.SpawnProcess:
"""
Executes a bundle in a subprocess.
Args:
bundle: The bundle to execute.
Returns:
A multiprocessing.context.SpawnProcess.
"""
ctx = multiprocessing.get_context("spawn")
env = env or {}
# Install dependencies if necessary
if dependencies := bundle.get("dependencies"):
subprocess.check_call(
[_get_uv_path(), "pip", "install", *dependencies.split("\n")],
# Copy the current environment to ensure we install into the correct venv
env=os.environ,
)
process = ctx.Process(
target=_extract_and_run_flow,
kwargs={
"bundle": bundle,
"env": get_current_settings().to_environment_variables(exclude_unset=True)
| os.environ
| env,
"cwd": cwd,
},
)
process.start()
return process
def convert_step_to_command(
step: dict[str, Any], key: str, quiet: bool = False
) -> list[str]:
"""
Converts a bundle upload or execution step to a command.
Args:
step: The step to convert.
key: The key to use for the remote file when downloading or uploading.
quiet: Whether to suppress `uv` output from the command.
Returns:
A list of strings representing the command to run the step.
"""
# Start with uv run
command = ["uv", "run"]
if quiet:
command.append("--quiet")
step_keys = list(step.keys())
if len(step_keys) != 1:
raise ValueError("Expected exactly one function in step")
function_fqn = step_keys[0]
function_args = step[function_fqn]
# Add the `--with` argument to handle dependencies for running the step
requires: list[str] | str = function_args.get("requires", [])
if isinstance(requires, str):
requires = [requires]
if requires:
command.extend(["--with", ",".join(requires)])
# Add the `--python` argument to handle the Python version for running the step
python_version = sys.version_info
command.extend(["--python", f"{python_version.major}.{python_version.minor}"])
# Add the `-m` argument to defined the function to run
command.extend(["-m", function_fqn])
# Add any arguments with values defined in the step
for arg_name, arg_value in function_args.items():
if arg_name == "requires":
continue
command.extend([f"--{slugify(arg_name)}", arg_value])
# Add the `--key` argument to specify the remote file name
command.extend(["--key", key])
return command
def upload_bundle_to_storage(
bundle: SerializedBundle, key: str, upload_command: list[str]
) -> None:
"""
Uploads a bundle to storage.
Args:
bundle: The serialized bundle to upload.
key: The key to use for the remote file when uploading.
upload_command: The command to use to upload the bundle as a list of strings.
"""
# Write the bundle to a temporary directory so it can be uploaded to the bundle storage
# via the upload command
with tempfile.TemporaryDirectory() as temp_dir:
Path(temp_dir).joinpath(key).write_bytes(json.dumps(bundle).encode("utf-8"))
try:
full_command = upload_command + [key]
logger.debug("Uploading execution bundle with command: %s", full_command)
subprocess.check_call(
full_command,
cwd=temp_dir,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr.decode("utf-8")) from e
async def aupload_bundle_to_storage(
bundle: SerializedBundle, key: str, upload_command: list[str]
) -> None:
"""
Asynchronously uploads a bundle to storage.
Args:
bundle: The serialized bundle to upload.
key: The key to use for the remote file when uploading.
upload_command: The command to use to upload the bundle as a list of strings.
"""
# Write the bundle to a temporary directory so it can be uploaded to the bundle storage
# via the upload command
with tempfile.TemporaryDirectory() as temp_dir:
await (
anyio.Path(temp_dir)
.joinpath(key)
.write_bytes(json.dumps(bundle).encode("utf-8"))
)
try:
full_command = upload_command + [key]
logger.debug("Uploading execution bundle with command: %s", full_command)
await anyio.run_process(
full_command,
cwd=temp_dir,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr.decode("utf-8")) from e
__all__ = [
"execute_bundle_from_file",
"convert_step_to_command",
"create_bundle_for_flow_run",
"extract_flow_from_bundle",
"execute_bundle_in_subprocess",
"SerializedBundle",
]
| SerializedBundle |
python | django__django | django/forms/widgets.py | {
"start": 1437,
"end": 2556
} | class ____:
element_template = "{path}"
def __init__(self, path, **attributes):
self._path = path
self.attributes = attributes
def __eq__(self, other):
# Compare the path only, to ensure performant comparison in
# Media.merge.
return (self.__class__ is other.__class__ and self.path == other.path) or (
isinstance(other, str) and self._path == other
)
def __hash__(self):
# Hash the path only, to ensure performant comparison in Media.merge.
return hash(self._path)
def __str__(self):
return format_html(
self.element_template,
path=self.path,
attributes=flatatt(self.attributes),
)
def __repr__(self):
return f"{type(self).__qualname__}({self._path!r})"
@property
def path(self):
"""
Ensure an absolute path.
Relative paths are resolved via the {% static %} template tag.
"""
if self._path.startswith(("http://", "https://", "/")):
return self._path
return static(self._path)
| MediaAsset |
python | PrefectHQ__prefect | src/prefect/settings/sources.py | {
"start": 2634,
"end": 3997
} | class ____(DotEnvSettingsSource):
def __init__(
self,
settings_cls: type[BaseSettings],
env_file: Optional[DotenvType] = ENV_FILE_SENTINEL,
env_file_encoding: Optional[str] = None,
case_sensitive: Optional[bool] = None,
env_prefix: Optional[str] = None,
env_nested_delimiter: Optional[str] = None,
env_ignore_empty: Optional[bool] = None,
env_parse_none_str: Optional[str] = None,
env_parse_enums: Optional[bool] = None,
env_blacklist: Optional[List[str]] = None,
) -> None:
super().__init__(
settings_cls,
env_file,
env_file_encoding,
case_sensitive,
env_prefix,
env_nested_delimiter,
env_ignore_empty,
env_parse_none_str,
env_parse_enums,
)
self.env_blacklist = env_blacklist
if self.env_blacklist:
if isinstance(self.env_vars, dict):
for key in self.env_blacklist:
self.env_vars.pop(key, None)
else:
self.env_vars: dict[str, str | None] = {
key: value
for key, value in self.env_vars.items() # type: ignore
if key.lower() not in env_blacklist
}
| FilteredDotEnvSettingsSource |
python | boto__boto3 | tests/unit/docs/test_collection.py | {
"start": 660,
"end": 5894
} | class ____(BaseDocsTest):
def test_document_collections(self):
collection_documenter = CollectionDocumenter(
self.resource, self.root_services_path
)
collection_documenter.document_collections(self.doc_structure)
self.assert_contains_lines_in_order(
[
'-----------\nCollections\n-----------',
'Collections provide an interface to iterate over and ',
'manipulate groups of resources. ',
'For more information about collections refer to the ',
]
)
self.assert_contains_lines_in_order(
[
'samples',
'.. py:attribute:: MyService.ServiceResource.samples',
' A collection of Sample resources.'
'A Sample Collection will include all resources by default, '
'and extreme caution should be taken when performing actions '
'on all resources.',
' .. py:method:: all()',
(
' Creates an iterable of all Sample resources in the '
'collection.'
),
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.all()',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
' .. py:method:: filter(**kwargs)',
(
' Creates an iterable of all Sample resources in '
'the collection filtered by kwargs passed to method. '
'A Sample collection will include all resources by default '
'if no filters are provided, and extreme caution should be '
'taken when performing actions on all resources'
),
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.filter(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
' .. py:method:: limit(**kwargs)',
(
' Creates an iterable up to a specified amount of '
'Sample resources in the collection.'
),
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.limit(',
' count=123',
' )',
' :type count: integer',
(
' :param count: The limit to the number of resources '
'in the iterable.'
),
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
' .. py:method:: operate(**kwargs)',
' **Request Syntax**',
' response = myservice.samples.operate(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: dict',
' :returns: ',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string',",
" 'Bar': 'string'",
' }',
' **Response Structure**',
' - *(dict) --* ',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
' .. py:method:: page_size(**kwargs)',
(
' Creates an iterable of all Sample resources in the '
'collection, but limits the number of items returned by '
'each service call by the specified amount.'
),
' **Request Syntax**',
' ::',
'',
' sample_iterator = myservice.samples.page_size(',
' count=123',
' )',
' :type count: integer',
(
' :param count: The number of items returned by '
'each service call'
),
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
' ',
],
self.get_nested_service_contents(
'myservice', 'service-resource', 'samples'
),
)
| TestCollectionDocumenter |
python | keon__algorithms | tests/test_tree.py | {
"start": 1417,
"end": 3072
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
import random
random.seed(18719)
cls.random = random
cls.range = 10000
def setUp(self):
self.keys_to_insert = [self.random.randrange(-self.range, self.range)
for i in range(self.range)]
def test_insertion_and_find_even_degree(self):
btree = BTree(4)
for i in self.keys_to_insert:
btree.insert_key(i)
for i in range(100):
key = self.random.choice(self.keys_to_insert)
self.assertTrue(btree.find(key))
def test_insertion_and_find_odd_degree(self):
btree = BTree(3)
for i in self.keys_to_insert:
btree.insert_key(i)
for i in range(100):
key = self.random.choice(self.keys_to_insert)
self.assertTrue(btree.find(key))
def test_deletion_even_degree(self):
btree = BTree(4)
key_list = set(self.keys_to_insert)
for i in key_list:
btree.insert_key(i)
for key in key_list:
btree.remove_key(key)
self.assertFalse(btree.find(key))
self.assertEqual(btree.root.keys, [])
self.assertEqual(btree.root.children, [])
def test_deletion_odd_degree(self):
btree = BTree(3)
key_list = set(self.keys_to_insert)
for i in key_list:
btree.insert_key(i)
for key in key_list:
btree.remove_key(key)
self.assertFalse(btree.find(key))
self.assertEqual(btree.root.keys, [])
self.assertEqual(btree.root.children, [])
| TestBTree |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 50731,
"end": 51474
} | class ____(Operation):
def call(self, x):
return backend.numpy.bitwise_invert(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.bitwise_invert", "keras.ops.numpy.bitwise_invert"])
def bitwise_invert(x):
"""Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of the
integers in the input arrays. This ufunc implements the C/Python operator
`~`.
Args:
x: Input integer tensor.
Returns:
Result tensor.
"""
if any_symbolic_tensors((x,)):
return BitwiseInvert().symbolic_call(x)
return backend.numpy.bitwise_invert(x)
| BitwiseInvert |
python | huggingface__transformers | src/transformers/models/nemotron/modeling_nemotron.py | {
"start": 28701,
"end": 39562
} | class ____(NemotronPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`NemotronDecoderLayer`]
Args:
config: NemotronConfig
"""
def __init__(self, config: NemotronConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[NemotronDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps)
self.rotary_emb = NemotronRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
# embed positions
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
# TODO: re-enable check: Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron
| NemotronModel |
python | getsentry__sentry | tests/sentry/testutils/thread_leaks/test_assertion.py | {
"start": 264,
"end": 3015
} | class ____:
def test_no_leaks_passes_cleanly(self) -> None:
"""Test that clean code passes without issues."""
with assert_none():
pass # No threads created
# Should not raise
def test_thread_leak_strict_mode_raises(self) -> None:
"""Test that thread leaks raise in strict mode."""
stop = Event()
thread = Thread(target=stop.wait, daemon=True)
try:
with pytest.raises(ThreadLeakAssertionError) as exc_info:
with assert_none():
# Create a daemon thread that won't block test completion
thread.start()
finally:
stop.set()
thread.join()
stack_diff = str(exc_info.value)
log_test_info(f"ORIG: {stack_diff}")
# all of the numbers are effectively random
stack_diff = re.sub("[0-9]+", "$N", stack_diff)
assert "\n <_MainThread(MainThread, started $N)>@None\n" in stack_diff
# Remove context lines, containing thread leaks from other tests
stack_diff = re.sub("^ .*\n", "", stack_diff, flags=re.MULTILINE)
# Verify the error message contains useful debugging info
assert (
stack_diff
== """
+ <Thread(Thread-$N (wait), started daemon $N)>@threading.Event.wait
+ File "./tests/sentry/testutils/thread_leaks/test_assertion.py", line $N, in test_thread_leak_strict_mode_raises
+ thread = Thread(target=stop.wait, daemon=True)
+ \n""" # note: that's a load-bearing whitespace!
)
def test_thread_that_exits_during_context_passes(self) -> None:
"""Test that threads which complete and exit don't trigger assertion error."""
with assert_none():
# Create and start a thread that will complete quickly
thread = Thread(target=lambda: None, daemon=True)
thread.start()
# Wait for thread to complete before context exits
thread.join(timeout=1.0)
# Should not raise - thread completed and is no longer active
def test_thread_leak_always_raises(self) -> None:
"""Test that thread leaks always raise an error (no more non-strict mode)."""
stop = Event()
thread = Thread(target=stop.wait, daemon=True)
try:
with pytest.raises(ThreadLeakAssertionError) as exc_info:
with assert_none():
# Create a daemon thread leak
thread.start()
finally:
stop.set()
thread.join()
# Verify the exception contains the leaked thread
assert hasattr(exc_info.value, "thread_leaks")
assert thread in exc_info.value.thread_leaks
| TestAssertNoneIntegration |
python | dask__distributed | distributed/diagnostics/progress.py | {
"start": 1065,
"end": 4360
} | class ____(SchedulerPlugin):
"""Tracks progress of a set of keys or futures
On creation we provide a set of keys or futures that interest us as well as
a scheduler. We traverse through the scheduler's dependencies to find all
relevant keys on which our keys depend. We then plug into the scheduler to
learn when our keys become available in memory at which point we record
their completion.
State
-----
keys: set
Set of keys that are not yet computed
all_keys: set
Set of all keys that we track
This class performs no visualization. However it is used by other classes,
notably TextProgressBar and ProgressWidget, which do perform visualization.
"""
def __init__(self, keys, scheduler, minimum=0, dt=0.1, complete=False, name=None):
self.name = name or f"progress-{tokenize(keys, minimum, dt, complete)}"
self.keys = {k.key if hasattr(k, "key") else k for k in keys}
self.keys = {k for k in self.keys}
self.scheduler = scheduler
self.complete = complete
self._minimum = minimum
self._dt = dt
self.last_duration = 0
self._start_time = default_timer()
self._running = False
self.status = None
self.extra = {}
async def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
await asyncio.sleep(0.05)
tasks = [self.scheduler.tasks[k] for k in keys]
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(tasks, complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(tasks, complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
logger.debug("Set up Progress keys")
for k in errors:
self.transition(
k, None, "erred", stimulus_id="progress-setup", exception=True
)
def transition(self, key, start, finish, *args, **kwargs):
if key in self.keys and start == "processing" and finish == "memory":
logger.debug("Progress sees key %s", key)
self.keys.remove(key)
if not self.keys:
self.stop()
if key in self.all_keys and finish == "erred":
logger.debug("Progress sees task erred")
self.stop(exception=kwargs["exception"], key=key)
if key in self.keys and finish == "forgotten":
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True, key=key)
def restart(self, scheduler):
self.stop()
def stop(self, exception=None, key=None):
if self.name in self.scheduler.plugins:
self.scheduler.remove_plugin(name=self.name)
if exception:
self.status = "error"
self.extra.update(
{"exception": self.scheduler.tasks[key].exception, "key": key}
)
else:
self.status = "finished"
logger.debug("Remove Progress plugin")
| Progress |
python | weaviate__weaviate-python-client | weaviate/collections/classes/data.py | {
"start": 1605,
"end": 1924
} | class ____(_DataReference):
"""This class represents a reference between objects within a collection to be used when batching."""
target_collection: str
def _to_beacons(self) -> List[str]:
return [f"{BEACON}{self.target_collection}/{uuid}" for uuid in self._to_uuids()]
@dataclass
| DataReferenceMulti |
python | huggingface__transformers | src/transformers/models/deepseek_vl/modular_deepseek_vl.py | {
"start": 4390,
"end": 4474
} | class ____(IdeficsBaseModelOutputWithPast):
pass
| DeepseekVLBaseModelOutputWithPast |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 520815,
"end": 521685
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "users")
field = sgqlc.types.Field(
sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field"
)
users = sgqlc.types.Field(
"UserConnection",
graphql_name="users",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| ProjectV2ItemFieldUserValue |
python | huggingface__transformers | src/transformers/models/diffllama/modular_diffllama.py | {
"start": 19461,
"end": 19518
} | class ____(GemmaForCausalLM):
pass
| DiffLlamaForCausalLM |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/transfers/s3_to_wasb.py | {
"start": 1618,
"end": 1901
} | class ____(Exception):
"""Custom exception raised when neither a blob_prefix or blob_name are passed to the operator."""
def __init__(self):
message: str = "One of blob_name or blob_prefix must be provided."
super().__init__(message)
| InvalidAzureBlobParameters |
python | agronholm__apscheduler | src/apscheduler/_decorators.py | {
"start": 505,
"end": 2905
} | class ____(TaskDefaults):
id: str | UnsetValue = attrs.field(default=unset)
job_executor: str | UnsetValue = attrs.field(
validator=if_not_unset(instance_of(str)), default=unset
)
max_running_jobs: int | None | UnsetValue = attrs.field(
validator=if_not_unset(optional(instance_of(int))), default=unset
)
misfire_grace_time: timedelta | None | UnsetValue = attrs.field(
converter=as_timedelta,
validator=if_not_unset(optional(instance_of(timedelta))),
default=unset,
)
metadata: MetadataType | UnsetValue = attrs.field(
validator=if_not_unset(valid_metadata), default=unset
)
def task(
id: str | UnsetValue = unset,
*,
job_executor: str | UnsetValue = unset,
max_running_jobs: int | None | UnsetValue = unset,
misfire_grace_time: int | timedelta | None | UnsetValue = unset,
metadata: MetadataType | UnsetValue = unset,
) -> Callable[[T], T]:
"""
Decorate a function to have implied defaults as an APScheduler task.
:param id: the task ID to use
:param str job_executor: name of the job executor that will run the task
:param int | None max_running_jobs: maximum number of instances of the task that are
allowed to run concurrently
:param ~datetime.timedelta | None misfire_grace_time: maximum number of seconds the
run time of jobs created for the task are allowed to be late, compared to the
scheduled run time
:param metadata: key-value pairs for storing JSON compatible custom information
"""
def wrapper(func: T) -> T:
if not isinstance(func, Callable):
raise ValueError("only functions can be decorated with @task")
if hasattr(func, TASK_PARAMETERS_KEY):
raise ValueError(
"this function already has APScheduler task parameters set"
)
setattr(
func,
TASK_PARAMETERS_KEY,
TaskParameters(
id=id,
job_executor=job_executor,
max_running_jobs=max_running_jobs,
misfire_grace_time=misfire_grace_time,
metadata=metadata,
),
)
return func
return wrapper
def get_task_params(func: Callable[..., Any]) -> TaskParameters:
return getattr(func, TASK_PARAMETERS_KEY, None) or TaskParameters()
| TaskParameters |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks_test.py | {
"start": 4994,
"end": 7909
} | class ____(test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
h = basic_session_run_hooks.StopAtStepHook(last_step=10)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 9))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 10))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 11))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_on_num_step(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 13))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 14))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 16))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_with_multiple_steps(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
| StopAtStepTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 15812,
"end": 17096
} | class ____(DefaultErrorHandler):
"""
Custom error handler for handling export errors specific to Mixpanel streams.
This handler addresses:
- 400 status code with "to_date cannot be later than today" message, indicating a potential timezone mismatch.
- ConnectionResetError during response parsing, indicating a need to retry the request.
If the response does not match these specific cases, the handler defers to the parent class's implementation.
"""
def interpret_response(self, response_or_exception: Optional[Union[requests.Response, Exception]] = None) -> ErrorResolution:
if isinstance(response_or_exception, requests.Response):
try:
# trying to parse response to avoid ConnectionResetError and retry if it occurs
iter_dicts(response_or_exception.iter_lines(decode_unicode=True))
except ConnectionResetError:
return ErrorResolution(
response_action=ResponseAction.RETRY,
failure_type=FailureType.transient_error,
error_message=f"Response status code: {response_or_exception.status_code}. Retrying...",
)
return super().interpret_response(response_or_exception)
| ExportErrorHandler |
python | getsentry__sentry | tests/sentry/hybridcloud/services/test_region_organization_provisioning.py | {
"start": 1178,
"end": 9886
} | class ____(TestCase):
def get_provisioning_args(
self, user: User, is_test: bool = False, create_default_team: bool = True
) -> OrganizationProvisioningOptions:
return OrganizationProvisioningOptions(
provision_options=OrganizationOptions(
name="Santry",
slug="santry",
owning_user_id=user.id,
is_test=is_test,
create_default_team=create_default_team,
),
post_provision_options=PostProvisionOptions(),
)
def organization_matches_provisioning_args(
self, organization_id: int, provisioning_options: OrganizationProvisioningOptions
) -> None:
with assume_test_silo_mode(SiloMode.REGION):
org: Organization = Organization.objects.get(id=organization_id)
assert org.slug == provisioning_options.provision_options.slug
assert org.name == provisioning_options.provision_options.name
assert (
org.get_default_owner().id == provisioning_options.provision_options.owning_user_id
)
assert org.is_test == provisioning_options.provision_options.is_test
def assert_has_default_team_and_membership(self, organization_id: int, user_id: int) -> None:
with assume_test_silo_mode(SiloMode.REGION):
org_membership = OrganizationMember.objects.get(
organization_id=organization_id, user_id=user_id
)
team = Team.objects.get(organization_id=organization_id)
OrganizationMemberTeam.objects.get(
team_id=team.id, organizationmember_id=org_membership.id
)
def test_provisions_when_no_conflicting_orgs(self) -> None:
user = self.create_user()
provision_options = self.get_provisioning_args(user)
organization_id = 42
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert result
self.organization_matches_provisioning_args(
organization_id=organization_id, provisioning_options=provision_options
)
self.assert_has_default_team_and_membership(organization_id, user.id)
def test_provisions_test_org_without_default_team(self) -> None:
user = self.create_user()
provision_options = self.get_provisioning_args(user, create_default_team=False)
organization_id = 42
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert result
self.organization_matches_provisioning_args(
organization_id=organization_id, provisioning_options=provision_options
)
with assume_test_silo_mode(SiloMode.REGION):
assert not Team.objects.filter(organization_id=organization_id).exists()
def test_provisions_when_fully_conflicting_org_has_matching_owner(self) -> None:
user = self.create_user()
organization_id = 42
existing_org = self.create_organization(
id=organization_id, slug="santry", name="Santry", owner=user
)
assert existing_org.id == organization_id
provision_options = self.get_provisioning_args(user, create_default_team=False)
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert result
self.organization_matches_provisioning_args(
organization_id=organization_id, provisioning_options=provision_options
)
def test_does_not_provision_and_returns_false_when_multiple_orgs_conflict(self) -> None:
organization_id = 42
# Org with a matching id
self.create_organization(
id=organization_id, slug="newsantry", name="NewSantry", owner=self.create_user()
)
# Org with a matching slug
self.create_organization(slug="santry", name="Santry", owner=self.create_user())
provisioning_user = self.create_user()
provision_options = self.get_provisioning_args(provisioning_user, create_default_team=False)
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert not result
with assume_test_silo_mode(SiloMode.REGION):
# Ensure that the user has not been added to any orgs since provisioning failed
provisioning_user_memberships = OrganizationMember.objects.filter(
user_id=provisioning_user.id
)
assert not provisioning_user_memberships.exists()
def test_truncates_name_when_too_long(self) -> None:
user = self.create_user()
provision_options = self.get_provisioning_args(user)
provision_options.provision_options.name = "a" * 128
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=42, provision_payload=provision_options, region_name="us"
)
assert result is True
with assume_test_silo_mode(SiloMode.REGION):
org: Organization = Organization.objects.get(id=42)
assert org.name == "a" * 64
def test_does_not_provision_and_returns_false_when_conflicting_org_with_different_owner(
self,
) -> None:
organization_id = 42
self.create_organization(
id=organization_id, slug="santry", name="Santry", owner=self.create_user()
)
provisioning_user = self.create_user()
provision_options = self.get_provisioning_args(provisioning_user)
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert not result
with assume_test_silo_mode(SiloMode.REGION):
provisioning_user_memberships = OrganizationMember.objects.filter(
user_id=provisioning_user.id
)
assert not provisioning_user_memberships.exists()
def test_does_not_provision_when_organization_id_already_in_use(
self,
) -> None:
organization_id = 42
user = self.create_user()
self.create_organization(
id=organization_id, slug="something-different", name="Santry", owner=user
)
provision_options = self.get_provisioning_args(user)
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert not result
with assume_test_silo_mode(SiloMode.REGION):
assert not Organization.objects.filter(
slug=provision_options.provision_options.slug
).exists()
def test_does_not_provision_when_organization_slug_already_in_use(
self,
) -> None:
organization_id = 42
user = self.create_user()
self.create_organization(slug="santry", name="Santry", owner=user)
provision_options = self.get_provisioning_args(user)
result = region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id, provision_payload=provision_options, region_name="us"
)
assert not result
with assume_test_silo_mode(SiloMode.REGION):
assert not Organization.objects.filter(id=organization_id).exists()
def test_streamline_only_is_true(self) -> None:
"""
All new organizations should never see the legacy UI.
"""
user = self.create_user()
provision_options = self.get_provisioning_args(user)
organization_id = 42
region_organization_provisioning_rpc_service.create_organization_in_region(
organization_id=organization_id,
provision_payload=provision_options,
region_name="us",
)
with assume_test_silo_mode(SiloMode.REGION):
org: Organization = Organization.objects.get(id=organization_id)
assert OrganizationOption.objects.get_value(org, "sentry:streamline_ui_only")
@control_silo_test(regions=create_test_regions("us"))
| TestRegionOrganizationProvisioningCreateInRegion |
python | coleifer__peewee | peewee.py | {
"start": 246474,
"end": 247247
} | class ____(BaseModelSelect, CompoundSelectQuery):
def __init__(self, model, *args, **kwargs):
self.model = model
super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs)
def _get_model_cursor_wrapper(self, cursor):
return self.lhs._get_model_cursor_wrapper(cursor)
def _normalize_model_select(fields_or_models):
fields = []
for fm in fields_or_models:
if is_model(fm):
fields.extend(fm._meta.sorted_fields)
elif isinstance(fm, ModelAlias):
fields.extend(fm.get_field_aliases())
elif isinstance(fm, Table) and fm._columns:
fields.extend([getattr(fm, col) for col in fm._columns])
else:
fields.append(fm)
return fields
| ModelCompoundSelectQuery |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/hooks/databricks.py | {
"start": 3374,
"end": 5273
} | class ____:
"""Utility class for the run state concept of Databricks runs."""
RUN_LIFE_CYCLE_STATES = [
"PENDING",
"RUNNING",
"TERMINATING",
"TERMINATED",
"SKIPPED",
"INTERNAL_ERROR",
"QUEUED",
]
def __init__(
self, life_cycle_state: str, result_state: str = "", state_message: str = "", *args, **kwargs
) -> None:
if life_cycle_state not in self.RUN_LIFE_CYCLE_STATES:
raise AirflowException(
f"Unexpected life cycle state: {life_cycle_state}: If the state has "
"been introduced recently, please check the Databricks user "
"guide for troubleshooting information"
)
self.life_cycle_state = life_cycle_state
self.result_state = result_state
self.state_message = state_message
@property
def is_terminal(self) -> bool:
"""True if the current state is a terminal state."""
return self.life_cycle_state in ("TERMINATED", "SKIPPED", "INTERNAL_ERROR")
@property
def is_successful(self) -> bool:
"""True if the result state is SUCCESS."""
return self.result_state == "SUCCESS"
def __eq__(self, other: object) -> bool:
if not isinstance(other, RunState):
return NotImplemented
return (
self.life_cycle_state == other.life_cycle_state
and self.result_state == other.result_state
and self.state_message == other.state_message
)
def __hash__(self):
return hash((self.life_cycle_state, self.result_state, self.state_message))
def __repr__(self) -> str:
return str(self.__dict__)
def to_json(self) -> str:
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, data: str) -> RunState:
return RunState(**json.loads(data))
| RunState |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/deprecated4.py | {
"start": 193,
"end": 992
} | class ____:
@property
@deprecated("Deprecated v1 getter")
def v1(self) -> str:
return ""
@v1.setter
def v1(self, value: str) -> None: ...
@v1.deleter
def v1(self) -> None: ...
@property
def v2(self) -> str:
return ""
@deprecated("Deprecated v2 setter")
@v2.setter
def v2(self, value: str) -> None: ...
@v2.deleter
@deprecated("Deprecated v2 deleter")
def v2(self) -> None: ...
a = A()
# This should generate an error if reportDeprecated is enabled.
v1 = a.v1
a.v1 = ""
del a.v1
v2 = a.v2
# This should generate an error if reportDeprecated is enabled.
a.v2 = ""
# This should generate an error if reportDeprecated is enabled.
a.v2 += ""
# This should generate an error if reportDeprecated is enabled.
del a.v2
| A |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 55035,
"end": 58307
} | class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.cross_attention = EdgeTamVideoPerceiverAttention(config)
self.mlp = EdgeTamVideoPerceiverMLP(config)
self.dropout = nn.Dropout(config.perceiver_resampler_hidden_dropout)
self.self_attention = EdgeTamVideoPerceiverAttention(config)
self.self_mlp = EdgeTamVideoPerceiverMLP(config)
# Layer norms moved from attention classes to here
self.layer_norm_input = nn.LayerNorm(config.perceiver_resampler_hidden_size)
self.layer_norm_latents = nn.LayerNorm(config.perceiver_resampler_hidden_size)
self.layer_norm_self = nn.LayerNorm(config.perceiver_resampler_hidden_size)
def forward(
self,
latents: torch.Tensor,
input_features: torch.Tensor,
positional_encoding: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# Cross attention with layer norms
normalized_latents = self.layer_norm_latents(latents)
normalized_input = self.layer_norm_input(input_features)
cross_attention_output = self.cross_attention(
query=normalized_latents,
key=normalized_input,
value=normalized_input,
positional_encoding=positional_encoding,
)
latents = latents + self.dropout(cross_attention_output)
mlp_output = self.mlp(latents)
latents = latents + mlp_output
# Self attention with layer norm
normalized_latents_self = self.layer_norm_self(latents)
self_attention_output = self.self_attention(
query=normalized_latents_self, key=normalized_latents_self, value=normalized_latents_self
)
latents = latents + self_attention_output
self_mlp_output = self.self_mlp(latents)
latents = latents + self_mlp_output
return latents
def window_partition(hidden_state, window_size):
"""
Partition into non-overlapping windows with padding if needed.
Args:
hidden_state (`torch.Tensor`):
Input tokens with [batch_size, height, width, num_channels].
window_size (`int`):
Window size.
Returns:
`tuple(torch.FloatTensor)` comprising various elements:
- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
- (padded_height, padded_width): padded height and width before partition
"""
batch_size, height, width, num_channels = hidden_state.shape
pad_height = (window_size - height % window_size) % window_size
pad_width = (window_size - width % window_size) % window_size
# Noop in case pad_width == 0 and pad_height == 0.
hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))
padded_height, padded_width = height + pad_height, width + pad_width
hidden_state = hidden_state.view(
batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels
)
windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows, (padded_height, padded_width)
| EdgeTamVideoPerceiverEncoderLayer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 76567,
"end": 77072
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("repository_id", "head_sha", "client_mutation_id")
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
head_sha = sgqlc.types.Field(
sgqlc.types.non_null(GitObjectID), graphql_name="headSha"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| CreateCheckSuiteInput |
python | django__django | tests/postgres_tests/models.py | {
"start": 5995,
"end": 6311
} | class ____(PostgreSQLModel):
room = models.ForeignKey("Room", on_delete=models.CASCADE)
datespan = DateRangeField()
start = models.DateTimeField()
end = models.DateTimeField()
cancelled = models.BooleanField(default=False)
requirements = models.JSONField(blank=True, null=True)
| HotelReservation |
python | kamyu104__LeetCode-Solutions | Python/count-pairs-in-two-arrays.py | {
"start": 33,
"end": 572
} | class ____(object):
def countPairs(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
for i in xrange(len(nums1)):
nums1[i] -= nums2[i]
nums1.sort()
result = 0
left, right = 0, len(nums1)-1
while left < right:
if nums1[left] > 0 or -nums1[left] < nums1[right]:
result += right-left
right -= 1
else:
left += 1
return result
| Solution |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_bulk_operations.py | {
"start": 295,
"end": 1015
} | class ____(TestCase):
def setUp(self):
class _BookResource(resources.ModelResource):
class Meta:
model = Book
use_bulk = True
self.resource = _BookResource()
rows = [(i + 1, "book_name") for i in range(10)]
self.dataset = tablib.Dataset(*rows, headers=["id", "name"])
def init_update_test_data(self, model=Book):
[model.objects.create(name="book_name") for _ in range(10)]
self.assertEqual(10, model.objects.count())
rows = model.objects.all().values_list("id", "name")
updated_rows = [(r[0], "UPDATED") for r in rows]
self.dataset = tablib.Dataset(*updated_rows, headers=["id", "name"])
| BulkTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataplex.py | {
"start": 2351,
"end": 2544
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataplex Tasks link."""
name = "Dataplex Tasks"
key = "tasks_conf"
format_str = DATAPLEX_TASKS_LINK
| DataplexTasksLink |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 827465,
"end": 828005
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("can_create_repositories", "visibility")
can_create_repositories = sgqlc.types.Field(
Boolean, graphql_name="canCreateRepositories"
)
visibility = sgqlc.types.Field(
OrgUpdateMemberRepositoryCreationPermissionAuditEntryVisibility,
graphql_name="visibility",
)
| OrgUpdateMemberRepositoryCreationPermissionAuditEntry |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 104488,
"end": 108706
} | class ____:
def test_datacopied(self):
from scipy.linalg._decomp import _datacopied
M = matrix([[0, 1], [2, 3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self, dtype=None, copy=None):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
"""Check linalg works with non-aligned memory (float32)"""
# Allocate 402 bytes of memory (allocated on boundary)
a = arange(402, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z = z.reshape((10, 10))
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="crashes on ppc64le")
def test_aligned_mem():
"""Check linalg works with non-aligned memory (float64)"""
# Allocate 804 bytes of memory (allocated on boundary)
a = arange(804, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z = z.reshape((10, 10))
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
"""Check that complex objects don't need to be completely aligned"""
# Allocate 1608 bytes of memory (allocated on boundary)
a = zeros(1608, dtype=np.uint8)
# Create an array with boundary offset 8
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z = z.reshape((10, 10))
eig(z, overwrite_a=True)
# This does not need special handling
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i], np.ndarray):
# Try misaligning a[i]
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size,
dtype=a[i].dtype)
aa = aa.reshape(a[i].shape)
aa[...] = a[i]
a[i] = aa
func(*a, **kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a, **kwargs)
@pytest.mark.xfail(run=False,
reason="Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10, dtype=float)
R = np.arange(100).reshape((10, 10))
S = np.arange(20000, dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
S = S.reshape((10, 10))
b = np.ones(10)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig, (S,), dict(overwrite_a=True)), # crash
(eigvals, (S,), dict(overwrite_a=True)), # no crash
(lu, (S,), dict(overwrite_a=True)), # no crash
(lu_factor, (S,), dict(overwrite_a=True)), # no crash
(lu_solve, ((LU, piv), b), dict(overwrite_b=True)),
(solve, (S, b), dict(overwrite_a=True, overwrite_b=True)),
(svd, (M,), dict(overwrite_a=True)), # no crash
(svd, (R,), dict(overwrite_a=True)), # no crash
(svd, (S,), dict(overwrite_a=True)), # crash
(svdvals, (S,), dict()), # no crash
(svdvals, (S,), dict(overwrite_a=True)), # crash
(cholesky, (M,), dict(overwrite_a=True)), # no crash
(qr, (S,), dict(overwrite_a=True)), # crash
(rq, (S,), dict(overwrite_a=True)), # crash
(hessenberg, (S,), dict(overwrite_a=True)), # crash
(schur, (S,), dict(overwrite_a=True)), # crash
]:
check_lapack_misaligned(func, args, kwargs)
# not properly tested
# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
| TestDatacopied |
python | hynek__structlog | tests/test_output.py | {
"start": 4312,
"end": 4972
} | class ____:
def test_does_not_cache(self):
"""
Due to doctest weirdness, we must not reuse PrintLoggers.
"""
f = PrintLoggerFactory()
assert f() is not f()
def test_passes_file(self):
"""
If a file is passed to the factory, it get passed on to the logger.
"""
pl = PrintLoggerFactory(stderr)()
assert stderr is pl._file
def test_ignores_args(self):
"""
PrintLogger doesn't take positional arguments. If any are passed to
the factory, they are not passed to the logger.
"""
PrintLoggerFactory()(1, 2, 3)
| TestPrintLoggerFactory |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/external-systems/apis/use_minimal_resource_in_asset.py | {
"start": 78,
"end": 1120
} | class ____(dg.ConfigurableResource):
@property
def query_string(self) -> str:
latittude = "37.615223"
longitude = "-122.389977"
time_zone = "America/Los_Angeles"
return f"https://api.sunrise-sunset.org/json?lat={latittude}&lng={longitude}&date=today&tzid={time_zone}"
def sunrise(self) -> str:
data = requests.get(self.query_string, timeout=5).json()
return data["results"]["sunrise"]
@dg.asset
# highlight-start
# Provide the resource to the asset
def sfo_sunrise(context: dg.AssetExecutionContext, sun_resource: SunResource) -> None:
# highlight-end
sunrise = sun_resource.sunrise()
context.log.info(f"Sunrise in San Francisco is at {sunrise}.")
# end_use_minimal_resource_in_asset
# start_use_minimal_resource_in_asset_defs
# highlight-start
# Include the resource in the Definitions object
@dg.definitions
def resources():
return dg.Definitions(resources={"sun_resource": SunResource()})
# highlight-end
# end_use_minimal_resource_in_asset_defs
| SunResource |
python | huggingface__transformers | src/transformers/models/pixtral/image_processing_pixtral.py | {
"start": 1450,
"end": 5379
} | class ____(ImagesKwargs, total=False):
"""
patch_size (`Union[dict[str, int], int]` *optional*, defaults to `{"height": 16, "width": 16}`):
Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method.
"""
patch_size: Union[dict[str, int], int]
# Adapted from function in image_transforms.py to ensure any transparent pixels are converted to white.
def convert_to_rgb(image: ImageInput) -> ImageInput:
"""
Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
as is.
Args:
image (Image):
The image to convert.
"""
requires_backends(convert_to_rgb, ["vision"])
if not isinstance(image, PIL.Image.Image):
return image
if image.mode == "RGB":
return image
# First we convert to RGBA to set background to white.
image = image.convert("RGBA")
# Create a new image with a white background.
new_image = PIL.Image.new("RGBA", image.size, "WHITE")
new_image.paste(image, (0, 0), image)
new_image = new_image.convert("RGB")
return new_image
def _num_image_tokens(image_size: tuple[int, int], patch_size: tuple[int, int]) -> int:
"""
Calculate the number of image tokens given the image size and patch size.
Args:
image_size (`tuple[int, int]`):
The size of the image as `(height, width)`.
patch_size (`tuple[int, int]`):
The patch size as `(height, width)`.
Returns:
`int`: The number of image tokens.
"""
height, width = image_size
patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size)
num_width_tokens = (width - 1) // patch_width + 1
num_height_tokens = (height - 1) // patch_height + 1
return num_height_tokens, num_width_tokens
def get_resize_output_image_size(
input_image: ImageInput,
size: Union[int, tuple[int, int], list[int], tuple[int]],
patch_size: Union[int, tuple[int, int], list[int], tuple[int]],
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple:
"""
Find the target (height, width) dimension of the output image after resizing given the input image and the desired
size.
Args:
input_image (`ImageInput`):
The image to resize.
size (`int` or `tuple[int, int]`):
Max image size an input image can be. Must be a dictionary with the key "longest_edge".
patch_size (`int` or `tuple[int, int]`):
The patch_size as `(height, width)` to use for resizing the image. If patch_size is an integer, `(patch_size, patch_size)`
will be used
input_data_format (`ChannelDimension`, *optional*):
The channel dimension format of the input image. If unset, will use the inferred format from the input.
Returns:
`tuple`: The target (height, width) dimension of the output image after resizing.
"""
max_height, max_width = size if isinstance(size, (tuple, list)) else (size, size)
patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size)
height, width = get_image_size(input_image, input_data_format)
ratio = max(height / max_height, width / max_width)
if ratio > 1:
# Original implementation uses `round` which utilises bankers rounding, which can lead to surprising results
# Here we use floor to ensure the image is always smaller than the given "longest_edge"
height = int(math.floor(height / ratio))
width = int(math.floor(width / ratio))
num_height_tokens, num_width_tokens = _num_image_tokens((height, width), (patch_height, patch_width))
return num_height_tokens * patch_height, num_width_tokens * patch_width
| PixtralImageProcessorKwargs |
python | fluentpython__example-code-2e | 07-1class-func/bingocall.py | {
"start": 173,
"end": 552
} | class ____:
def __init__(self, items):
self._items = list(items) # <1>
random.shuffle(self._items) # <2>
def pick(self): # <3>
try:
return self._items.pop()
except IndexError:
raise LookupError('pick from empty BingoCage') # <4>
def __call__(self): # <5>
return self.pick()
# end::BINGO[]
| BingoCage |
python | tiangolo__fastapi | docs_src/body/tutorial003_py310.py | {
"start": 61,
"end": 324
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
app = FastAPI()
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
return {"item_id": item_id, **item.dict()}
| Item |
python | EpistasisLab__tpot | tpot/search_spaces/nodes/estimator_node.py | {
"start": 4629,
"end": 5027
} | class ____(SearchSpace):
def __init__(self, method, space, hyperparameter_parser=default_hyperparameter_parser):
self.method = method
self.space = space
self.hyperparameter_parser = hyperparameter_parser
def generate(self, rng=None):
return EstimatorNodeIndividual(self.method, self.space, hyperparameter_parser=self.hyperparameter_parser, rng=rng) | EstimatorNode |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/settings.py | {
"start": 881,
"end": 2051
} | class ____(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "kubernetes", "observer"))
enabled: bool = Field(
default=True,
description="Whether the Kubernetes observer is enabled to watch for Prefect-submitted Kubernetes pod and job events.",
)
replicate_pod_events: bool = Field(
default=True,
description="Whether the Kubernetes observer should replicate Prefect-submitted Kubernetes pod events, which can be used for Prefect Automations.",
)
namespaces: Namespaces = Field(
default_factory=set,
description="The namespaces to watch for Prefect-submitted Kubernetes "
"jobs and pods. If not provided, the watch will be cluster-wide.",
)
additional_label_filters: LabelFilters = Field(
default_factory=dict,
description="Additional label filters to apply to the watch for "
"Prefect-submitted Kubernetes jobs and pods. If not provided, the watch will "
"include all pods and jobs with the `prefect.io/flow-run-id` label. Labels "
"should be provided in the format `key=value`.",
)
| KubernetesObserverSettings |
python | tensorflow__tensorflow | tensorflow/compiler/tests/adam_test.py | {
"start": 1665,
"end": 7958
} | class ____(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types | self.complex_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in self.float_types | self.complex_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
for dtype in self.float_types | self.complex_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
else:
update2.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
| AdamOptimizerTest |
python | pallets__jinja | src/jinja2/ext.py | {
"start": 1386,
"end": 8147
} | class ____:
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
identifier: t.ClassVar[str]
def __init_subclass__(cls) -> None:
cls.identifier = f"{cls.__module__}.{cls.__name__}"
#: if this extension parses this is the list of tags it's listening to.
tags: set[str] = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment: Environment) -> None:
self.environment = environment
def bind(self, environment: Environment) -> "te.Self":
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(
self, source: str, name: str | None, filename: str | None = None
) -> str:
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(
self, stream: "TokenStream"
) -> t.Union["TokenStream", t.Iterable["Token"]]:
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
"""
return stream
def parse(self, parser: "Parser") -> nodes.Node | list[nodes.Node]:
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name: str, lineno: int | None = None) -> nodes.ExtensionAttribute:
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(
self,
name: str,
args: list[nodes.Expr] | None = None,
kwargs: list[nodes.Keyword] | None = None,
dyn_args: nodes.Expr | None = None,
dyn_kwargs: nodes.Expr | None = None,
lineno: int | None = None,
) -> nodes.Call:
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(
self.attr(name, lineno=lineno),
args,
kwargs,
dyn_args,
dyn_kwargs,
lineno=lineno,
)
@pass_context
def _gettext_alias(
__context: Context, *args: t.Any, **kwargs: t.Any
) -> t.Any | Undefined:
return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
@pass_context
def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, even if there are no
# variables. This makes translation strings more consistent
# and predictable. This requires escaping
return rv % variables # type: ignore
return gettext
def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
@pass_context
def ngettext(
__context: Context,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return ngettext
def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
@pass_context
def pgettext(
__context: Context, __string_ctx: str, __string: str, **variables: t.Any
) -> str:
variables.setdefault("context", __string_ctx)
rv = __context.call(func, __string_ctx, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return pgettext
def _make_new_npgettext(
func: t.Callable[[str, str, str, int], str],
) -> t.Callable[..., str]:
@pass_context
def npgettext(
__context: Context,
__string_ctx: str,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
variables.setdefault("context", __string_ctx)
variables.setdefault("num", __num)
rv = __context.call(func, __string_ctx, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return npgettext
| Extension |
python | FactoryBoy__factory_boy | factory/base.py | {
"start": 21782,
"end": 22292
} | class ____(BaseFactory[T], metaclass=FactoryMetaClass):
"""Factory base with build and create support.
This class has the ability to support multiple ORMs by using custom creation
functions.
"""
# Backwards compatibility
AssociatedClassError: Type[Exception]
class Meta(BaseMeta):
pass
# Add the association after metaclass execution.
# Otherwise, AssociatedClassError would be detected as a declaration.
Factory.AssociatedClassError = errors.AssociatedClassError
| Factory |
python | google__jax | jax/_src/layout.py | {
"start": 908,
"end": 969
} | class ____:
def __repr__(self):
return "AUTO"
| AutoLayout |
python | pandas-dev__pandas | pandas/tests/dtypes/test_missing.py | {
"start": 25762,
"end": 27994
} | class ____:
@pytest.mark.parametrize("func", [libmissing.checknull, isna])
@pytest.mark.parametrize(
"value",
na_vals + sometimes_na_vals, # type: ignore[operator]
)
def test_checknull_na_vals(self, func, value):
assert func(value)
@pytest.mark.parametrize("func", [libmissing.checknull, isna])
@pytest.mark.parametrize("value", inf_vals)
def test_checknull_inf_vals(self, func, value):
assert not func(value)
@pytest.mark.parametrize("func", [libmissing.checknull, isna])
@pytest.mark.parametrize("value", int_na_vals)
def test_checknull_intna_vals(self, func, value):
assert not func(value)
@pytest.mark.parametrize("func", [libmissing.checknull, isna])
@pytest.mark.parametrize("value", never_na_vals)
def test_checknull_never_na_vals(self, func, value):
assert not func(value)
@pytest.mark.parametrize(
"value",
na_vals + sometimes_na_vals, # type: ignore[operator]
)
def test_checknull_old_na_vals(self, value):
assert libmissing.checknull(value)
@pytest.mark.parametrize("value", int_na_vals)
def test_checknull_old_intna_vals(self, value):
assert not libmissing.checknull(value)
def test_is_matching_na(self, nulls_fixture, nulls_fixture2):
left = nulls_fixture
right = nulls_fixture2
assert libmissing.is_matching_na(left, left)
if left is right:
assert libmissing.is_matching_na(left, right)
elif is_float(left) and is_float(right):
# np.nan vs float("NaN") we consider as matching
assert libmissing.is_matching_na(left, right)
elif type(left) is type(right):
# e.g. both Decimal("NaN")
assert libmissing.is_matching_na(left, right)
else:
assert not libmissing.is_matching_na(left, right)
def test_is_matching_na_nan_matches_none(self):
assert not libmissing.is_matching_na(None, np.nan)
assert not libmissing.is_matching_na(np.nan, None)
assert libmissing.is_matching_na(None, np.nan, nan_matches_none=True)
assert libmissing.is_matching_na(np.nan, None, nan_matches_none=True)
| TestLibMissing |
python | fastapi__sqlmodel | docs_src/tutorial/one/tutorial007.py | {
"start": 100,
"end": 1636
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.id == 1)
results = session.exec(statement)
hero = results.first()
print("Hero:", hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | yaml__pyyaml | lib/yaml/scanner.py | {
"start": 593,
"end": 906
} | class ____:
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
| SimpleKey |
python | scipy__scipy | scipy/integrate/_ode.py | {
"start": 40275,
"end": 43117
} | class ____(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'computation successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
__class_getitem__ = None
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
self.iwork = zeros((21,), dtype=np.int32)
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork,
self.nsteps, self.verbosity]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = f'Unexpected istate={istate:d}'
warnings.warn(f'{self.__class__.__name__:s}: '
f'{self.messages.get(istate, unexpected_istate_msg):s}',
stacklevel=2)
self.success = 0
return y, x
def _solout(self, x, y):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
| dopri5 |
python | pyca__cryptography | src/cryptography/hazmat/_oid.py | {
"start": 10382,
"end": 10527
} | class ____:
CA_ISSUERS = ObjectIdentifier("1.3.6.1.5.5.7.48.2")
OCSP = ObjectIdentifier("1.3.6.1.5.5.7.48.1")
| AuthorityInformationAccessOID |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 84796,
"end": 92480
} | class ____(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`Mask2FormerMaskedAttentionDecoderLayer`]. The decoder updates the query embeddings through multiple cross
(masked) and self-attention layers. The decoder uses a new **masked attention** mechanism instead of the standard
cross-attention, which extracts localized features by constraining cross-attention to within the foreground region
of the predicted mask for each query, instead of attending to the full feature map.
Args:
config (`Mask2FormerConfig`):
Configuration used to instantiate Mask2FormerMaskedAttentionDecoder.
"""
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.config = config
self.mask_feature_size = config.mask_feature_size
self.dropout = config.dropout
self.layerdrop = config.dropout
self.num_feature_levels = 3 # level embedding (3 scales)
self.decoder_layers = config.decoder_layers - 1
self.layers = nn.ModuleList(
[Mask2FormerMaskedAttentionDecoderLayer(self.config) for _ in range(self.decoder_layers)]
)
self.layernorm = nn.LayerNorm(config.hidden_dim)
self.mask_predictor = Mask2FormerMaskPredictor(
hidden_size=config.hidden_dim,
num_heads=config.num_attention_heads,
mask_feature_size=self.mask_feature_size,
)
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds: Optional[torch.Tensor] = None,
multi_stage_positional_embeddings: Optional[torch.Tensor] = None,
pixel_embeddings: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
query_position_embeddings: Optional[torch.Tensor] = None,
feature_size_list: Optional[list] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(num_queries, batch_size, hidden_size)`):
The query embeddings that are passed into the decoder.
multi_stage_positional_embeddings (`torch.FloatTensor` of shape `(height*width, batch_size, num_channels)`):
Position embeddings that are added to the keys in each cross(masked)-attention layer.
pixel_embeddings (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel
Decoder.
query_position_embeddings (`torch.FloatTensor` of shape `(num_queries, batch_size, hidden_size)`):
, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross(masked)-attention of the decoder.
feature_size_list (`list[torch.Size]`):
This is a list containing shapes (height & width) of multi-scale features from the Pixel Decoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
# intermediate hidden states with layernorm applied - required for predicting class logits
intermediate = ()
# decoder layers
all_hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
# intermediate mask predictions from transformer decoder layers
intermediate_mask_predictions = ()
intermediate_hidden_states = self.layernorm(inputs_embeds)
intermediate += (intermediate_hidden_states,)
predicted_mask, attention_mask = self.mask_predictor(
intermediate_hidden_states, pixel_embeddings, feature_size_list[0]
)
intermediate_mask_predictions += (predicted_mask,)
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = torch.rand([])
if self.training and (dropout_probability < self.layerdrop):
continue
level_index = idx % self.num_feature_levels
where = (attention_mask.sum(-1) != attention_mask.shape[-1]).to(attention_mask.dtype)
# Multiply the attention mask instead of indexing to avoid issue in torch.export.
attention_mask = attention_mask * where.unsqueeze(-1)
layer_outputs = decoder_layer(
hidden_states,
level_index,
None, # attention_mask
multi_stage_positional_embeddings,
query_position_embeddings,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=attention_mask,
output_attentions=output_attentions,
)
intermediate_hidden_states = self.layernorm(layer_outputs[0])
predicted_mask, attention_mask = self.mask_predictor(
intermediate_hidden_states,
pixel_embeddings,
feature_size_list[(idx + 1) % self.num_feature_levels],
)
intermediate_mask_predictions += (predicted_mask,)
# add intermediate hidden states with layer norm applied which will be used for predicting class logits
intermediate += (intermediate_hidden_states,)
hidden_states = layer_outputs[0]
if output_attentions:
attentions += (layer_outputs[1],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
hidden_states = hidden_states.transpose(1, 0)
if not return_dict:
outputs = [hidden_states, all_hidden_states, attentions, intermediate, intermediate_mask_predictions]
return tuple(v for v in outputs if v is not None)
return Mask2FormerMaskedAttentionDecoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=attentions,
intermediate_hidden_states=intermediate,
masks_queries_logits=intermediate_mask_predictions,
)
# Copied from transformers.models.maskformer.modeling_maskformer.PredictionBlock with MaskFormer->Mask2Former
| Mask2FormerMaskedAttentionDecoder |
python | django__django | tests/template_tests/test_library.py | {
"start": 2142,
"end": 3273
} | class ____(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_simple_tag(self):
@self.library.simple_tag
def func():
return ""
self.assertIn("func", self.library.tags)
def test_simple_tag_parens(self):
@self.library.simple_tag()
def func():
return ""
self.assertIn("func", self.library.tags)
def test_simple_tag_name_kwarg(self):
@self.library.simple_tag(name="name")
def func():
return ""
self.assertIn("name", self.library.tags)
def test_simple_tag_invalid(self):
msg = "Invalid arguments provided to simple_tag"
with self.assertRaisesMessage(ValueError, msg):
self.library.simple_tag("invalid")
def test_simple_tag_wrapped(self):
@self.library.simple_tag
@functools.lru_cache(maxsize=32)
def func():
return ""
func_wrapped = self.library.tags["func"].__wrapped__
self.assertIs(func_wrapped, func)
self.assertTrue(hasattr(func_wrapped, "cache_info"))
| SimpleTagRegistrationTests |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_model_config.py | {
"start": 356,
"end": 487
} | class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
| TestModel |
python | django__django | django/contrib/admin/migrations/0003_logentry_add_action_flag_choices.py | {
"start": 43,
"end": 538
} | class ____(migrations.Migration):
dependencies = [
("admin", "0002_logentry_remove_auto_add"),
]
# No database changes; adds choices to action_flag.
operations = [
migrations.AlterField(
model_name="logentry",
name="action_flag",
field=models.PositiveSmallIntegerField(
choices=[(1, "Addition"), (2, "Change"), (3, "Deletion")],
verbose_name="action flag",
),
),
]
| Migration |
python | pytorch__pytorch | torch/fx/passes/graph_manipulation.py | {
"start": 1466,
"end": 3965
} | class ____(NamedTuple):
output_size: int
total_size: int
@compatibility(is_backward_compatible=False)
def get_size_of_all_nodes(
fx_module: GraphModule, args: Optional[list[torch.Tensor]] = None
) -> None:
"""Given a fx graph module, update each node with its total size (weights + bias + output)
and its output_size(output). For a non-module node, the total size is the output size.
return total size"""
if args is not None:
# Mark shape and dtype for each node (node.shape and node.dtype)
ShapeProp(fx_module).propagate(*args)
# Calculate the total size of the whole fx graph
for node in fx_module.graph.nodes:
if node.op == "output":
break
node.size_bytes = get_size_of_node(fx_module, node)
return
@compatibility(is_backward_compatible=False)
def get_tensor_meta(node: Node) -> Any:
tensor_meta = node.meta.get("tensor_meta")
if not tensor_meta:
raise RuntimeError(
f"Node {node} has no tensor metadata associated with it! "
f"Check that shape propagation has run."
)
return tensor_meta
@compatibility(is_backward_compatible=False)
def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
"""Given a node with node.dtype and node.shape, return its total size and its output size.
total_size = weights + bias + output_size
"""
# Total num of elements
total_num_of_elems = 0
# For a module, consider all parameters
if node.op == "call_module":
submodule_dict = dict(fx_module.named_modules())
submodule = submodule_dict[node.target]
parameters = submodule.named_parameters()
# Parameters are named tuples
for _name, p in parameters:
total_num_of_elems += p.numel()
# Don't forget the output size
# node.shape is the shape of this node's output
tensor_meta = get_tensor_meta(node)
output_elem = tensor_meta.shape.numel()
total_num_of_elems += output_elem
# Assume for now if it's quantized then it's qint8 or quint8
if tensor_meta.is_quantized:
size_per_elem_bytes = torch._empty_affine_quantized(
[], dtype=tensor_meta.dtype
).element_size()
else:
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
total_size = size_per_elem_bytes * total_num_of_elems
output_size = size_per_elem_bytes * output_elem
return size_bytes(output_size, total_size)
| size_bytes |
python | apache__airflow | providers/jdbc/src/airflow/providers/jdbc/hooks/jdbc.py | {
"start": 1638,
"end": 10775
} | class ____(DbApiHook):
"""
General hook for JDBC access.
JDBC URL, username and password will be taken from the predefined connection.
Note that the whole JDBC URL must be specified in the "host" field in the DB.
Raises an airflow error if the given connection id doesn't exist.
To configure driver parameters, you can use the following methods:
1. Supply them as constructor arguments when instantiating the hook.
2. Set the "driver_path" and/or "driver_class" parameters in the "hook_params" dictionary when
creating the hook using SQL operators.
3. Set the "driver_path" and/or "driver_class" extra in the connection and correspondingly enable
the "allow_driver_path_in_extra" and/or "allow_driver_class_in_extra" options in the
"providers.jdbc" section of the Airflow configuration. If you're enabling these options in Airflow
configuration, you should make sure that you trust the users who can edit connections in the UI
to not use it maliciously.
4. Define the "sqlalchemy_scheme" property in the extra of the connection if you want to use the
SQLAlchemy engine from the JdbcHook. When using the JdbcHook, the "sqlalchemy_scheme" will by
default have the "jdbc" value, which is a protocol, not a database scheme or dialect. So in order
to be able to use SQLAlchemy with the JdbcHook, you need to define the "sqlalchemy_scheme"
property in the extra of the connection.
5. Patch the ``JdbcHook.default_driver_path`` and/or ``JdbcHook.default_driver_class`` values in the
``local_settings.py`` file.
See :doc:`/connections/jdbc` for full documentation.
:param args: passed to DbApiHook
:param driver_path: path to the JDBC driver jar file. See above for more info
:param driver_class: name of the JDBC driver class. See above for more info
:param kwargs: passed to DbApiHook
"""
conn_name_attr = "jdbc_conn_id"
default_conn_name = "jdbc_default"
conn_type = "jdbc"
hook_name = "JDBC Connection"
supports_autocommit = True
default_driver_path: str | None = None
default_driver_class: str | None = None
def __init__(
self,
*args,
driver_path: str | None = None,
driver_class: str | None = None,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self._driver_path = driver_path
self._driver_class = driver_class
self.lock = RLock()
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Get custom field behaviour."""
return {
"hidden_fields": ["port", "schema"],
"relabeling": {"host": "Connection URL"},
}
@property
def driver_path(self) -> str | None:
from airflow.configuration import conf
extra_driver_path = self.connection_extra_lower.get("driver_path")
if extra_driver_path:
if conf.getboolean("providers.jdbc", "allow_driver_path_in_extra", fallback=False):
self._driver_path = extra_driver_path
else:
self.log.warning(
"You have supplied 'driver_path' via connection extra but it will not be used. In order "
"to use 'driver_path' from extra you must set airflow config setting "
"`allow_driver_path_in_extra = True` in section `providers.jdbc`. Alternatively you may "
"specify it via 'driver_path' parameter of the hook constructor or via 'hook_params' "
"dictionary with key 'driver_path' if using SQL operators."
)
if not self._driver_path:
self._driver_path = self.default_driver_path
return self._driver_path
@property
def driver_class(self) -> str | None:
from airflow.configuration import conf
extra_driver_class = self.connection_extra_lower.get("driver_class")
if extra_driver_class:
if conf.getboolean("providers.jdbc", "allow_driver_class_in_extra", fallback=False):
self._driver_class = extra_driver_class
else:
self.log.warning(
"You have supplied 'driver_class' via connection extra but it will not be used. In order "
"to use 'driver_class' from extra you must set airflow config setting "
"`allow_driver_class_in_extra = True` in section `providers.jdbc`. Alternatively you may "
"specify it via 'driver_class' parameter of the hook constructor or via 'hook_params' "
"dictionary with key 'driver_class' if using SQL operators."
)
if not self._driver_class:
self._driver_class = self.default_driver_class
return self._driver_class
@property
def sqlalchemy_url(self) -> URL:
conn = self.connection
sqlalchemy_query = conn.extra_dejson.get("sqlalchemy_query", {})
if not isinstance(sqlalchemy_query, dict):
raise AirflowException("The parameter 'sqlalchemy_query' must be of type dict!")
sqlalchemy_scheme = conn.extra_dejson.get("sqlalchemy_scheme")
if sqlalchemy_scheme is None:
raise AirflowException(
"The parameter 'sqlalchemy_scheme' must be defined in extra for JDBC connections!"
)
return URL.create(
drivername=sqlalchemy_scheme,
username=conn.login,
password=conn.password,
host=conn.host,
port=conn.port,
database=conn.schema,
query=sqlalchemy_query,
)
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
engine_kwargs["creator"] = self.get_conn
return super().get_sqlalchemy_engine(engine_kwargs)
def get_conn(self) -> jaydebeapi.Connection:
conn: Connection = self.connection
host: str = cast("str", conn.host)
login: str = cast("str", conn.login)
psw: str = cast("str", conn.password)
with self.lock:
conn = jaydebeapi.connect(
jclassname=self.driver_class,
url=str(host),
driver_args=[str(login), str(psw)],
jars=self.driver_path.split(",") if self.driver_path else None,
)
return conn
def set_autocommit(self, conn: jaydebeapi.Connection, autocommit: bool) -> None:
"""
Set autocommit for the given connection.
:param conn: The connection.
:param autocommit: The connection's autocommit setting.
"""
from jpype import JException
with suppress_and_warn(jaydebeapi.Error, JException):
conn.jconn.setAutoCommit(autocommit)
def get_autocommit(self, conn: jaydebeapi.Connection) -> bool:
"""
Get autocommit setting for the provided connection.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting. True if ``autocommit`` is set
to True on the connection. False if it is either not set, set to
False, or the connection does not support auto-commit.
"""
from jpype import JException
with suppress_and_warn(jaydebeapi.Error, JException):
return conn.jconn.getAutoCommit()
# This is reachable when the driver does not support autocommit then exceptions raised above are
# custom suppressed for jaydebeapi so we return False when control yields here.
return False # type: ignore[unreachable]
def get_uri(self) -> str:
"""Get the connection URI for the JDBC connection."""
conn = self.connection
extra = conn.extra_dejson
scheme = extra.get("sqlalchemy_scheme")
if not scheme:
return cast("str", conn.host)
driver = extra.get("sqlalchemy_driver")
uri_prefix = f"{scheme}+{driver}" if driver else scheme
auth_part = ""
if conn.login:
auth_part = quote_plus(conn.login)
if conn.password:
auth_part = f"{auth_part}:{quote_plus(conn.password)}"
auth_part = f"{auth_part}@"
host_part = conn.host or "localhost"
if conn.port:
host_part = f"{host_part}:{conn.port}"
schema_part = f"/{quote_plus(conn.schema)}" if conn.schema else ""
uri = f"{uri_prefix}://{auth_part}{host_part}{schema_part}"
sqlalchemy_query = extra.get("sqlalchemy_query", {})
if isinstance(sqlalchemy_query, dict):
query_string = urlencode({k: str(v) for k, v in sqlalchemy_query.items() if v is not None})
if query_string:
uri = f"{uri}?{query_string}"
return uri
| JdbcHook |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 2749,
"end": 2812
} | class ____:
def m(self, x: Self) -> None: ...
| Impl_ContraSelf |
python | ray-project__ray | python/ray/data/_internal/logical/operators/map_operator.py | {
"start": 15303,
"end": 16045
} | class ____(AbstractMap):
"""Logical operator for streaming repartition operation.
Args:
target_num_rows_per_block: The target number of rows per block granularity for
streaming repartition.
"""
def __init__(
self,
input_op: LogicalOperator,
target_num_rows_per_block: int,
):
super().__init__(
f"StreamingRepartition[num_rows_per_block={target_num_rows_per_block}]",
input_op,
)
self._target_num_rows_per_block = target_num_rows_per_block
@property
def target_num_rows_per_block(self) -> int:
return self._target_num_rows_per_block
def can_modify_num_rows(self) -> bool:
return False
| StreamingRepartition |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_os.py | {
"start": 383,
"end": 4154
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""V1PodOS - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
self.name = name
@property
def name(self):
"""Gets the name of this V1PodOS. # noqa: E501
Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null # noqa: E501
:return: The name of this V1PodOS. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1PodOS.
Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null # noqa: E501
:param name: The name of this V1PodOS. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodOS):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodOS):
return True
return self.to_dict() != other.to_dict()
| V1PodOS |
python | getsentry__sentry | src/sentry/utils/kvstore/cache.py | {
"start": 2284,
"end": 4220
} | class ____(KVStorage[str, V]):
"""
This class implements a compatibility layer for interacting with storages
that have existing data written with cache key prefixes.
"""
# XXX: ``keys`` must be ``str`` to avoid type mismatches when returning
# unwrapped values (e.g. from ``get_many``), even though the write path
# would accept ``Any`` type.
def __init__(
self,
storage: KVStorage[str, V],
prefix: str = BaseCache.prefix,
version: Any | None = None,
):
if version is None:
version = settings.CACHE_VERSION
self.storage = storage
self.prefix = prefix
self.version = version
def get(self, key: str) -> V | None:
return self.storage.get(wrap_key(self.prefix, self.version, key))
def get_many(self, keys: Sequence[str]) -> Iterator[tuple[str, V]]:
results = self.storage.get_many([wrap_key(self.prefix, self.version, key) for key in keys])
for key, value in results:
yield unwrap_key(self.prefix, self.version, key), value
def set(self, key: str, value: V, ttl: timedelta | None = None) -> None:
return self.storage.set(
wrap_key(self.prefix, self.version, key),
value,
ttl,
)
def delete(self, key: str) -> None:
self.storage.delete(wrap_key(self.prefix, self.version, key))
def delete_many(self, keys: Sequence[str]) -> None:
return self.storage.delete_many([wrap_key(self.prefix, self.version, key) for key in keys])
def bootstrap(self, automatic_expiry: bool = True) -> None:
self.storage.bootstrap()
def destroy(self) -> None:
# ``destroy`` is not implemented since the cache key prefix implies this
# is a shared keyspace, and suggests that this may cause collateral
# damage to other storage instances
raise NotImplementedError
| CacheKeyWrapper |
python | getsentry__sentry | src/sentry/search/eap/types.py | {
"start": 2626,
"end": 2746
} | class ____(EventsResponse):
confidence: ConfidenceData
page_token: NotRequired[PageToken]
@dataclass()
| EAPResponse |
python | getsentry__sentry | tests/sentry/auth/providers/test_saml2.py | {
"start": 547,
"end": 744
} | class ____(SAML2Provider):
name = "dummy"
key = "dummy_saml2"
def get_saml_setup_pipeline(self) -> list[AuthView]:
raise NotImplementedError
@control_silo_test
| DummySAML2Provider |
python | wandb__wandb | hatch_build.py | {
"start": 7516,
"end": 7854
} | class ____:
goos: str
goarch: str
def _to_goarch(arch: str) -> str:
"""Returns a valid GOARCH value or the empty string."""
return {
# amd64 synonyms
"amd64": "amd64",
"x86_64": "amd64",
# arm64 synonyms
"arm64": "arm64",
"aarch64": "arm64",
}.get(arch, "")
| TargetPlatform |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/base.py | {
"start": 1283,
"end": 16431
} | class ____(FunctionCallingLLM):
"""OCI large language models with function calling support."""
model: str = Field(description="Id of the OCI Generative AI model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
context_size: int = Field("The maximum number of tokens available for input.")
service_endpoint: Optional[str] = Field(
default=None,
description="service endpoint url.",
)
compartment_id: Optional[str] = Field(
default=None,
description="OCID of compartment.",
)
auth_type: Optional[str] = Field(
description="Authentication type, can be: API_KEY, SECURITY_TOKEN, INSTANCE_PRINCIPAL, RESOURCE_PRINCIPAL. If not specified, API_KEY will be used",
default="API_KEY",
)
auth_profile: Optional[str] = Field(
description="The name of the profile in ~/.oci/config. If not specified , DEFAULT will be used",
default="DEFAULT",
)
auth_file_location: Optional[str] = Field(
description="Path to the config file. If not specified, ~/.oci/config will be used",
default="~/.oci/config",
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the OCI Generative AI request.",
)
_client: Any = PrivateAttr()
_provider: str = PrivateAttr()
_serving_mode: str = PrivateAttr()
_completion_generator: str = PrivateAttr()
_chat_generator: str = PrivateAttr()
def __init__(
self,
model: str,
temperature: Optional[float] = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = 512,
context_size: Optional[int] = None,
service_endpoint: Optional[str] = None,
compartment_id: Optional[str] = None,
auth_type: Optional[str] = "API_KEY",
auth_profile: Optional[str] = "DEFAULT",
auth_file_location: Optional[str] = "~/.oci/config",
client: Optional[Any] = None,
provider: Optional[str] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
"""
Initializes the OCIGenAI class.
Args:
model (str): The Id of the model to be used for generating embeddings, e.g., "meta.llama-2-70b-chat".
temperature (Optional[float]): The temperature to use for sampling. Default specified in lama_index.core.constants.DEFAULT_TEMPERATURE.
max_tokens (Optional[int]): The maximum number of tokens to generate. Default is 512.
context_size (Optional[int]): The maximum number of tokens available for input. If not specified, the default context size for the model will be used.
service_endpoint (str): service endpoint url, e.g., "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
compartment_id (str): OCID of the compartment.
auth_type (Optional[str]): Authentication type, can be: API_KEY (default), SECURITY_TOKEN, INSTANCEAL, RESOURCE_PRINCIPAL. If not specified, API_KEY will be used
auth_profile (Optional[str]): The name of the profile in ~/.oci/config. If not specified , DEFAULT will be used
auth_file_location (Optional[str]): Path to the config file, If not specified, ~/.oci/config will be used.
client (Optional[Any]): An optional OCI client object. If not provided, the client will be created using the
provided service endpoint and authentifcation method.
provider (Optional[str]): Provider name of the model. If not specified, the provider will be derived from the model name.
additional_kwargs (Optional[Dict[str, Any]]): Additional kwargs for the LLM.
"""
context_size = get_context_size(model, context_size)
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
context_size=context_size,
service_endpoint=service_endpoint,
compartment_id=compartment_id,
auth_type=auth_type,
auth_profile=auth_profile,
auth_file_location=auth_file_location,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._client = client or create_client(
auth_type, auth_profile, auth_file_location, service_endpoint
)
self._provider = get_provider(model, provider)
self._serving_mode = get_serving_mode(model)
self._completion_generator = get_completion_generator()
self._chat_generator = get_chat_generator()
@classmethod
def class_name(cls) -> str:
return "OCIGenAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_size,
num_output=self.max_tokens,
is_chat_model=self.model in CHAT_MODELS,
model_name=self.model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return stream_complete_fn(prompt, **kwargs)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
oci_params = self._provider.messages_to_oci_params(messages)
oci_params["is_stream"] = False
tools = kwargs.pop("tools", None)
all_kwargs = self._get_all_kwargs(**kwargs)
chat_params = {**all_kwargs, **oci_params}
if tools:
chat_params["tools"] = [
self._provider.convert_to_oci_tool(tool) for tool in tools
]
request = self._chat_generator(
compartment_id=self.compartment_id,
serving_mode=self._serving_mode,
chat_request=self._provider.oci_chat_request(**chat_params),
)
response = self._client.chat(request)
generation_info = self._provider.chat_generation_info(response)
llm_output = {
"model_id": response.data.model_id,
"model_version": response.data.model_version,
"request_id": response.request_id,
"content-length": response.headers["content-length"],
}
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=self._provider.chat_response_to_text(response),
additional_kwargs=generation_info,
),
raw=response.__dict__,
)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
oci_params = self._provider.messages_to_oci_params(messages)
oci_params["is_stream"] = True
tools = kwargs.pop("tools", None)
all_kwargs = self._get_all_kwargs(**kwargs)
chat_params = {**all_kwargs, **oci_params}
if tools:
chat_params["tools"] = [
self._provider.convert_to_oci_tool(tool) for tool in tools
]
request = self._chat_generator(
compartment_id=self.compartment_id,
serving_mode=self._serving_mode,
chat_request=self._provider.oci_chat_request(**chat_params),
)
response = self._client.chat(request)
def gen() -> ChatResponseGen:
content = ""
tool_calls_accumulated = []
for event in response.data.events():
content_delta = self._provider.chat_stream_to_text(
json.loads(event.data)
)
content += content_delta
try:
event_data = json.loads(event.data)
tool_calls_data = None
for key in ["toolCalls", "tool_calls", "functionCalls"]:
if key in event_data:
tool_calls_data = event_data[key]
break
if tool_calls_data:
new_tool_calls = _format_oci_tool_calls(tool_calls_data)
for tool_call in new_tool_calls:
existing = next(
(
t
for t in tool_calls_accumulated
if t["name"] == tool_call["name"]
),
None,
)
if existing:
existing.update(tool_call)
else:
tool_calls_accumulated.append(tool_call)
generation_info = self._provider.chat_stream_generation_info(
event_data
)
if tool_calls_accumulated:
generation_info["tool_calls"] = tool_calls_accumulated
yield ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
additional_kwargs=generation_info,
),
delta=content_delta,
raw=event.__dict__,
)
except json.JSONDecodeError:
yield ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT, content=content
),
delta=content_delta,
raw=event.__dict__,
)
except Exception as e:
print(f"Error processing stream chunk: {e}")
yield ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT, content=content
),
delta=content_delta,
raw=event.__dict__,
)
return gen()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
raise NotImplementedError("Async chat is not implemented yet.")
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError("Async complete is not implemented yet.")
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError("Async stream chat is not implemented yet.")
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("Async stream complete is not implemented yet.")
# Function tooling integration methods
def _prepare_chat_with_tools(
self,
tools: Sequence["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
tool_specs = tools
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
messages = chat_history or []
if user_msg:
messages.append(user_msg)
oci_params = self._provider.messages_to_oci_params(messages)
chat_params = self._get_all_kwargs(**kwargs)
return {
"messages": messages,
"tools": tool_specs,
**({"tool_choice": "REQUIRED"} if tool_required else {}),
**oci_params,
**chat_params,
}
def _validate_chat_with_tools_response(
self,
response: ChatResponse,
tools: List["BaseTool"],
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> ChatResponse:
"""Validate the response from chat_with_tools."""
if not allow_parallel_tool_calls:
force_single_tool_call(response)
return response
def get_tool_calls_from_response(
self,
response: "ChatResponse",
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
validate_tool_call(tool_call)
argument_dict = (
json.loads(tool_call["input"])
if isinstance(tool_call["input"], str)
else tool_call["input"]
)
tool_selections.append(
ToolSelection(
tool_id=tool_call["toolUseId"],
tool_name=tool_call["name"],
tool_kwargs=argument_dict,
)
)
return tool_selections
| OCIGenAI |
python | joke2k__faker | tests/providers/test_internet.py | {
"start": 27434,
"end": 28449
} | class ____:
"""Test nl_NL internet provider methods"""
@patch(
"faker.providers.internet.Provider.user_name",
lambda x: "fabiënné",
)
def test_ascii_safe_email(self, faker):
email = faker.ascii_safe_email()
validate_email(email)
assert email.split("@")[0] == "fabienne"
@patch(
"faker.providers.internet.Provider.user_name",
lambda x: "fabiënné",
)
def test_ascii_free_email(self, faker):
email = faker.ascii_free_email()
validate_email(email)
assert email.split("@")[0] == "fabienne"
@patch(
"faker.providers.internet.Provider.user_name",
lambda x: "fabiënné",
)
def test_ascii_company_email(self, faker):
email = faker.ascii_company_email()
validate_email(email)
assert email.split("@")[0] == "fabienne"
def test_slug(self, faker):
num_of_samples = 100
for _ in range(num_of_samples):
assert faker.slug() != ""
| TestNlNl |
python | tensorflow__tensorflow | tensorflow/python/framework/smart_cond_test.py | {
"start": 3711,
"end": 5476
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTrue(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(True, lambda: constant_op.constant(1)),
(x == 0, raise_exception)]
y = smart_cond.smart_case(conditions, default=raise_exception,
exclusive=False)
z = smart_cond.smart_case(conditions, default=raise_exception,
exclusive=True)
with session.Session() as sess:
# No feed_dict necessary
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 1)
@test_util.run_deprecated_v1
def testFalse(self):
conditions = [(False, raise_exception)]
y = smart_cond.smart_case(conditions,
default=lambda: constant_op.constant(1),
exclusive=False)
z = smart_cond.smart_case(conditions,
default=lambda: constant_op.constant(1),
exclusive=True)
with session.Session() as sess:
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 1)
@test_util.run_deprecated_v1
def testMix(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
y = constant_op.constant(10)
conditions = [(x > 1, lambda: constant_op.constant(1)),
(y < 1, raise_exception),
(False, raise_exception),
(True, lambda: constant_op.constant(3))]
z = smart_cond.smart_case(conditions, default=raise_exception)
with session.Session() as sess:
self.assertEqual(sess.run(z, feed_dict={x: 2}), 1)
self.assertEqual(sess.run(z, feed_dict={x: 0}), 3)
| SmartCaseTest |
python | pytorch__pytorch | test/dynamo/test_base_hop.py | {
"start": 7008,
"end": 8991
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3]", L_y_: "f32[3, 3]"):
l_x_ = L_x_
l_y_ = L_y_
subgraph_0 = self.subgraph_0
invoke_quant_test = torch.ops.higher_order.invoke_quant_test(subgraph_0, l_x_, l_y_, scheme = 'nf4'); subgraph_0 = l_x_ = l_y_ = None
getitem: "f32[3, 3]" = invoke_quant_test[0]
getitem_1: "f32[3, 3]" = invoke_quant_test[1]
getitem_2: "f32[3, 3]" = invoke_quant_test[2]
getitem_3: "f32[3, 3]" = invoke_quant_test[3]; invoke_quant_test = None
return (getitem, getitem_1, getitem_2, getitem_3)
class subgraph_0(torch.nn.Module):
def forward(self, l_x_: "f32[3, 3]", l_y_: "f32[3, 3]"):
add_: "f32[3, 3]" = l_x_.add_(1); add_ = None
matmul: "f32[3, 3]" = l_x_ @ l_y_
sin: "f32[3, 3]" = matmul.sin(); matmul = None
cos: "f32[3, 3]" = sin.cos(); sin = None
add: "f32[3, 3]" = l_x_ + l_y_
sub: "f32[3, 3]" = l_x_ - l_y_
matmul_1: "f32[3, 3]" = l_x_ @ l_y_; l_x_ = l_y_ = None
return (cos, add, sub, matmul_1)
""", # noqa: B950
)
self.assertExpectedInline(
str(find_hop_schema(bk.graphs[0], invoke_quant_test)[0]),
"""invoke_quant_test(Any subgraph, Tensor(a1!) arg0, Tensor arg1, *, str scheme="nf4") -> (Tensor, Tensor, Tensor, Tensor)""", # noqa: B950
)
def test_none_input(self):
def inner(x, y):
if x is not None:
return y.sin()
return y.cos()
backend = EagerAndRecordGraphs()
@torch.compile(backend=backend, fullgraph=True)
def f(x, y):
return invoke_quant_test(inner, x, y, scheme="nf4")
x = None
y = torch.randn(3, 4)
out = f(x, y)
self.assertEqual(out, inner(x, y))
self.assertExpectedInline(
normalize_graph(backend.graphs[0]),
"""\
| GraphModule |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/dagster_dbt_translator.py | {
"start": 2541,
"end": 27962
} | class ____:
"""Holds a set of methods that derive Dagster asset definition metadata given a representation
of a dbt resource (models, tests, sources, etc).
This class is exposed so that methods can be overriden to customize how Dagster asset metadata
is derived.
"""
def __init__(self, settings: Optional[DagsterDbtTranslatorSettings] = None):
"""Initialize the translator.
Args:
settings (Optional[DagsterDbtTranslatorSettings]): Settings for the translator.
"""
self._settings = settings or DagsterDbtTranslatorSettings()
@property
def settings(self) -> DagsterDbtTranslatorSettings:
if not hasattr(self, "_settings"):
self._settings = DagsterDbtTranslatorSettings()
return self._settings
def get_resource_props(self, manifest: Mapping[str, Any], unique_id: str) -> Mapping[str, Any]:
"""Given a parsed manifest and a dbt unique_id, returns the dictionary of properties
for the corresponding dbt resource (e.g. model, seed, snapshot, source) as defined
in your dbt project. This can be used as a convenience method when overriding the
`get_asset_spec` method.
Args:
manifest (Mapping[str, Any]): The parsed manifest of the dbt project.
unique_id (str): The unique_id of the dbt resource.
Returns:
Mapping[str, Any]: The dictionary of properties for the corresponding dbt resource.
Examples:
.. code-block:: python
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_asset_spec(self, manifest: Mapping[str, Any], unique_id: str, project: Optional[DbtProject]) -> dg.AssetSpec:
base_spec = super().get_asset_spec(manifest, unique_id, project)
resource_props = self.get_resource_props(manifest, unique_id)
if resource_props["meta"].get("use_custom_group"):
return base_spec.replace_attributes(group_name="custom_group")
else:
return base_spec
"""
return get_node(manifest, unique_id)
def get_asset_spec(
self,
manifest: Mapping[str, Any],
unique_id: str,
project: Optional["DbtProject"],
) -> AssetSpec:
"""Returns an AssetSpec representing a specific dbt resource."""
# memoize resolution for a given manifest & unique_id
# since we recursively call get_asset_spec for dependencies
memo_id = (id(manifest), unique_id, id(project))
# Don't initialize this in the constructor in case a subclass does not call __init__
if not hasattr(self, "_resolved_specs"):
self._resolved_specs = {}
if memo_id in self._resolved_specs:
return self._resolved_specs[memo_id]
group_props = {group["name"]: group for group in manifest.get("groups", {}).values()}
resource_props = self.get_resource_props(manifest, unique_id)
# calculate the dependencies for the asset
upstream_ids = get_upstream_unique_ids(manifest, resource_props)
deps = [
AssetDep(
asset=self.get_asset_spec(manifest, upstream_id, project).key,
partition_mapping=self.get_partition_mapping(
resource_props, self.get_resource_props(manifest, upstream_id)
),
)
for upstream_id in upstream_ids
]
self_partition_mapping = self.get_partition_mapping(resource_props, resource_props)
if self_partition_mapping and has_self_dependency(resource_props):
deps.append(
AssetDep(
asset=self.get_asset_key(resource_props),
partition_mapping=self_partition_mapping,
)
)
resource_group_props = group_props.get(resource_props.get("group") or "")
if resource_group_props:
owners_resource_props = {
**resource_props,
# this overrides the group key in resource_props, which is bad as
# this key is not always empty and this dictionary generally differs
# in structure from other inputs, but this is necessary for backcompat
**({"group": resource_group_props} if resource_group_props else {}),
}
else:
owners_resource_props = resource_props
spec = AssetSpec(
key=self.get_asset_key(resource_props),
deps=deps,
description=self.get_description(resource_props),
metadata=self.get_metadata(resource_props),
skippable=True,
group_name=self.get_group_name(resource_props),
code_version=self.get_code_version(resource_props),
automation_condition=self.get_automation_condition(resource_props),
owners=self.get_owners(owners_resource_props),
tags=self.get_tags(resource_props),
kinds={"dbt", manifest.get("metadata", {}).get("adapter_type", "dbt")},
partitions_def=self.get_partitions_def(resource_props),
)
# add integration-specific metadata to the spec
spec = spec.merge_attributes(
metadata={
DAGSTER_DBT_MANIFEST_METADATA_KEY: DbtManifestWrapper(manifest=manifest),
DAGSTER_DBT_TRANSLATOR_METADATA_KEY: self,
DAGSTER_DBT_UNIQUE_ID_METADATA_KEY: resource_props["unique_id"],
**({DAGSTER_DBT_PROJECT_METADATA_KEY: project} if project else {}),
}
)
if self.settings.enable_code_references:
if not project:
raise DagsterInvalidDefinitionError(
"enable_code_references requires a DbtProject to be supplied"
" to the @dbt_assets decorator."
)
spec = spec.replace_attributes(
metadata=_attach_sql_model_code_reference(
existing_metadata=spec.metadata,
dbt_resource_props=resource_props,
project=project,
)
)
self._resolved_specs[memo_id] = spec
return self._resolved_specs[memo_id]
def get_asset_check_spec(
self,
asset_spec: AssetSpec,
manifest: Mapping[str, Any],
unique_id: str,
project: Optional["DbtProject"],
) -> Optional[AssetCheckSpec]:
return default_asset_check_fn(
manifest=manifest,
dagster_dbt_translator=self,
asset_key=asset_spec.key,
test_unique_id=unique_id,
project=project,
)
@public
def get_asset_key(self, dbt_resource_props: Mapping[str, Any]) -> AssetKey:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster asset key that represents that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom asset key for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
AssetKey: The Dagster asset key for the dbt resource.
Examples:
Adding a prefix to the default asset key generated for each dbt resource:
.. code-block:: python
from typing import Any, Mapping
from dagster import AssetKey
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_asset_key(self, dbt_resource_props: Mapping[str, Any]) -> AssetKey:
return super().get_asset_key(dbt_resource_props).with_prefix("prefix")
Adding a prefix to the default asset key generated for each dbt resource, but only for dbt sources:
.. code-block:: python
from typing import Any, Mapping
from dagster import AssetKey
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_asset_key(self, dbt_resource_props: Mapping[str, Any]) -> AssetKey:
asset_key = super().get_asset_key(dbt_resource_props)
if dbt_resource_props["resource_type"] == "source":
asset_key = asset_key.with_prefix("my_prefix")
return asset_key
"""
return default_asset_key_fn(dbt_resource_props)
@public
@beta(emit_runtime_warning=False)
def get_partition_mapping(
self,
dbt_resource_props: Mapping[str, Any],
dbt_parent_resource_props: Mapping[str, Any],
) -> Optional[PartitionMapping]:
"""A function that takes two dictionaries: the first, representing properties of a dbt
resource; and the second, representing the properties of a parent dependency to the first
dbt resource. The function returns the Dagster partition mapping for the dbt dependency.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom partition mapping for a dbt dependency.
Args:
dbt_resource_props (Mapping[str, Any]):
A dictionary representing the dbt child resource.
dbt_parent_resource_props (Mapping[str, Any]):
A dictionary representing the dbt parent resource, in relationship to the child.
Returns:
Optional[PartitionMapping]:
The Dagster partition mapping for the dbt resource. If None is returned, the
default partition mapping will be used.
"""
return None
@public
def get_description(self, dbt_resource_props: Mapping[str, Any]) -> str:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster description for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom description for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
str: The description for the dbt resource.
Examples:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_description(self, dbt_resource_props: Mapping[str, Any]) -> str:
return "custom description"
"""
return default_description_fn(dbt_resource_props)
@public
def get_metadata(self, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster metadata for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom metadata for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Mapping[str, Any]: A dictionary representing the Dagster metadata for the dbt resource.
Examples:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_metadata(self, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:
return {"custom": "metadata"}
"""
return default_metadata_from_dbt_resource_props(dbt_resource_props)
@public
def get_tags(self, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, str]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster tags for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
dbt tags are strings, but Dagster tags are key-value pairs. To bridge this divide, the dbt
tag string is used as the Dagster tag key, and the Dagster tag value is set to the empty
string, "".
Any dbt tags that don't match Dagster's supported tag key format (e.g. they contain
unsupported characters) will be ignored.
This method can be overridden to provide custom tags for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Mapping[str, str]: A dictionary representing the Dagster tags for the dbt resource.
Examples:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_tags(self, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, str]:
return {"custom": "tag"}
"""
tags = dbt_resource_props.get("tags", [])
return {tag: "" for tag in tags if is_valid_tag_key(tag)}
@public
def get_group_name(self, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster group name for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom group name for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Optional[str]: A Dagster group name.
Examples:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_group_name(self, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:
return "custom_group_prefix" + dbt_resource_props.get("config", {}).get("group")
"""
return default_group_from_dbt_resource_props(dbt_resource_props)
@public
def get_code_version(self, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster code version for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom code version for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Optional[str]: A Dagster code version.
Examples:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_code_version(self, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:
return dbt_resource_props["checksum"]["checksum"]
"""
return default_code_version_fn(dbt_resource_props)
@public
def get_owners(self, dbt_resource_props: Mapping[str, Any]) -> Optional[Sequence[str]]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster owners for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide custom owners for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Optional[Sequence[str]]: A set of Dagster owners.
Examples:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_owners(self, dbt_resource_props: Mapping[str, Any]) -> Optional[Sequence[str]]:
return ["user@owner.com", "team:team@owner.com"]
"""
return default_owners_from_dbt_resource_props(dbt_resource_props)
@public
@beta(emit_runtime_warning=False)
def get_auto_materialize_policy(
self, dbt_resource_props: Mapping[str, Any]
) -> Optional[AutoMaterializePolicy]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster :py:class:`dagster.AutoMaterializePolicy` for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom auto-materialize policy for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Optional[AutoMaterializePolicy]: A Dagster auto-materialize policy.
Examples:
Set a custom auto-materialize policy for all dbt resources:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_auto_materialize_policy(self, dbt_resource_props: Mapping[str, Any]) -> Optional[AutoMaterializePolicy]:
return AutoMaterializePolicy.eager()
Set a custom auto-materialize policy for dbt resources with a specific tag:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_auto_materialize_policy(self, dbt_resource_props: Mapping[str, Any]) -> Optional[AutoMaterializePolicy]:
auto_materialize_policy = None
if "my_custom_tag" in dbt_resource_props.get("tags", []):
auto_materialize_policy = AutoMaterializePolicy.eager()
return auto_materialize_policy
"""
return default_auto_materialize_policy_fn(dbt_resource_props)
@public
@beta(emit_runtime_warning=False)
def get_automation_condition(
self, dbt_resource_props: Mapping[str, Any]
) -> Optional[AutomationCondition]:
"""A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster :py:class:`dagster.AutoMaterializePolicy` for that resource.
Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents
a model, seed, snapshot or source in a given dbt project. You can learn more about dbt
resources and the properties available in this dictionary here:
https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details
This method can be overridden to provide a custom AutomationCondition for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Optional[AutoMaterializePolicy]: A Dagster auto-materialize policy.
Examples:
Set a custom AutomationCondition for all dbt resources:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_automation_condition(self, dbt_resource_props: Mapping[str, Any]) -> Optional[AutomationCondition]:
return AutomationCondition.eager()
Set a custom AutomationCondition for dbt resources with a specific tag:
.. code-block:: python
from typing import Any, Mapping
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_automation_condition(self, dbt_resource_props: Mapping[str, Any]) -> Optional[AutomationCondition]:
automation_condition = None
if "my_custom_tag" in dbt_resource_props.get("tags", []):
automation_condition = AutomationCondition.eager()
return automation_condition
"""
auto_materialize_policy = self.get_auto_materialize_policy(dbt_resource_props)
return (
auto_materialize_policy.to_automation_condition() if auto_materialize_policy else None
)
def get_partitions_def(
self, dbt_resource_props: Mapping[str, Any]
) -> Optional[PartitionsDefinition]:
"""[INTERNAL] A function that takes a dictionary representing properties of a dbt resource, and
returns the Dagster :py:class:`dagster.PartitionsDefinition` for that resource.
This method can be overridden to provide a custom PartitionsDefinition for a dbt resource.
Args:
dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.
Returns:
Optional[PartitionsDefinition]: A Dagster partitions definition.
Examples:
Set a custom AutomationCondition for dbt resources with a specific tag:
.. code-block:: python
from typing import Any, Mapping
from dagster import DailyPartitionsDefinition
from dagster_dbt import DagsterDbtTranslator
class CustomDagsterDbtTranslator(DagsterDbtTranslator):
def get_partitions_def(self, dbt_resource_props: Mapping[str, Any]) -> Optional[PartitionsDefinition]:
if "my_custom_tag" in dbt_resource_props.get("tags", []):
return DailyPartitionsDefinition(start_date="2022-01-01")
else:
return None
"""
return None
@dataclass
| DagsterDbtTranslator |
python | mlflow__mlflow | mlflow/entities/run_data.py | {
"start": 364,
"end": 3039
} | class ____(_MlflowObject):
"""
Run data (metrics and parameters).
"""
def __init__(self, metrics=None, params=None, tags=None):
"""Construct a new mlflow.entities.RunData instance.
Args:
metrics: List of mlflow.entities.Metric.
params: List of mlflow.entities.Param.
tags: List of mlflow.entities.RunTag.
"""
# Maintain the original list of metrics so that we can easily convert it back to
# protobuf
self._metric_objs = metrics or []
self._metrics = {metric.key: metric.value for metric in self._metric_objs}
self._params = {param.key: param.value for param in (params or [])}
self._tags = {tag.key: tag.value for tag in (tags or [])}
@property
def metrics(self):
"""
Dictionary of string key -> metric value for the current run.
For each metric key, the metric value with the latest timestamp is returned. In case there
are multiple values with the same latest timestamp, the maximum of these values is returned.
"""
return self._metrics
@property
def params(self):
"""Dictionary of param key (string) -> param value for the current run."""
return self._params
@property
def tags(self):
"""Dictionary of tag key (string) -> tag value for the current run."""
return self._tags
def _add_metric(self, metric):
self._metrics[metric.key] = metric.value
self._metric_objs.append(metric)
def _add_param(self, param):
self._params[param.key] = param.value
def _add_tag(self, tag):
self._tags[tag.key] = tag.value
def to_proto(self):
run_data = ProtoRunData()
run_data.metrics.extend([m.to_proto() for m in self._metric_objs])
run_data.params.extend([ProtoParam(key=key, value=val) for key, val in self.params.items()])
run_data.tags.extend([ProtoRunTag(key=key, value=val) for key, val in self.tags.items()])
return run_data
def to_dictionary(self):
return {
"metrics": self.metrics,
"params": self.params,
"tags": self.tags,
}
@classmethod
def from_proto(cls, proto):
run_data = cls()
# iterate proto and add metrics, params, and tags
for proto_metric in proto.metrics:
run_data._add_metric(Metric.from_proto(proto_metric))
for proto_param in proto.params:
run_data._add_param(Param.from_proto(proto_param))
for proto_tag in proto.tags:
run_data._add_tag(RunTag.from_proto(proto_tag))
return run_data
| RunData |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-maximum-path-sum.py | {
"start": 1189,
"end": 1683
} | class ____(object):
# @param root, a tree node
# @return an integer
def maxPathSum(self, root):
def dfs(node):
if not node:
return (float("-inf"), 0)
max_left, curr_left = dfs(node.left)
max_right, curr_right = dfs(node.right)
return (max(max_left, max_right, node.val+max(curr_left, 0)+max(curr_right, 0)),
node.val+max(curr_left, curr_right, 0))
return dfs(root)[0]
| Solution2 |
python | mahmoud__boltons | boltons/urlutils.py | {
"start": 36484,
"end": 55909
} | class ____(dict):
"""A MultiDict is a dictionary that can have multiple values per key
and the OrderedMultiDict (OMD) is a MultiDict that retains
original insertion order. Common use cases include:
* handling query strings parsed from URLs
* inverting a dictionary to create a reverse index (values to keys)
* stacking data from multiple dictionaries in a non-destructive way
The OrderedMultiDict constructor is identical to the built-in
:class:`dict`, and overall the API constitutes an intuitive
superset of the built-in type:
>>> omd = OrderedMultiDict()
>>> omd['a'] = 1
>>> omd['b'] = 2
>>> omd.add('a', 3)
>>> omd.get('a')
3
>>> omd.getlist('a')
[1, 3]
Some non-:class:`dict`-like behaviors also make an appearance,
such as support for :func:`reversed`:
>>> list(reversed(omd))
['b', 'a']
Note that unlike some other MultiDicts, this OMD gives precedence
to the most recent value added. ``omd['a']`` refers to ``3``, not
``1``.
>>> omd
OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)])
>>> omd.poplast('a')
3
>>> omd
OrderedMultiDict([('a', 1), ('b', 2)])
>>> omd.pop('a')
1
>>> omd
OrderedMultiDict([('b', 2)])
If you want a safe-to-modify or flat dictionary, use
:meth:`OrderedMultiDict.todict()`.
>>> from pprint import pprint as pp # preserve printed ordering
>>> omd = OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)])
>>> pp(omd.todict())
{'a': 3, 'b': 2}
>>> pp(omd.todict(multi=True))
{'a': [1, 3], 'b': [2]}
With ``multi=False``, items appear with the keys in to original
insertion order, alongside the most-recently inserted value for
that key.
>>> OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)]).items(multi=False)
[('a', 3), ('b', 2)]
.. warning::
``dict(omd)`` changed behavior `in Python 3.7
<https://bugs.python.org/issue34320>`_ due to changes made to
support the transition from :class:`collections.OrderedDict` to
the built-in dictionary being ordered. Before 3.7, the result
would be a new dictionary, with values that were lists, similar
to ``omd.todict(multi=True)`` (but only shallow-copy; the lists
were direct references to OMD internal structures). From 3.7
onward, the values became singular, like
``omd.todict(multi=False)``. For reliable cross-version
behavior, just use :meth:`~OrderedMultiDict.todict()`.
"""
def __new__(cls, *a, **kw):
ret = super().__new__(cls)
ret._clear_ll()
return ret
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError('%s expected at most 1 argument, got %s'
% (self.__class__.__name__, len(args)))
super().__init__()
if args:
self.update_extend(args[0])
if kwargs:
self.update(kwargs)
def __getstate__(self):
return list(self.iteritems(multi=True))
def __setstate__(self, state):
self.clear()
self.update_extend(state)
def _clear_ll(self):
try:
_map = self._map
except AttributeError:
_map = self._map = {}
self.root = []
_map.clear()
self.root[:] = [self.root, self.root, None]
def _insert(self, k, v):
root = self.root
cells = self._map.setdefault(k, [])
last = root[PREV]
cell = [last, root, k, v]
last[NEXT] = root[PREV] = cell
cells.append(cell)
def add(self, k, v):
"""Add a single value *v* under a key *k*. Existing values under *k*
are preserved.
"""
values = super().setdefault(k, [])
self._insert(k, v)
values.append(v)
def addlist(self, k, v):
"""Add an iterable of values underneath a specific key, preserving
any values already under that key.
>>> omd = OrderedMultiDict([('a', -1)])
>>> omd.addlist('a', range(3))
>>> omd
OrderedMultiDict([('a', -1), ('a', 0), ('a', 1), ('a', 2)])
Called ``addlist`` for consistency with :meth:`getlist`, but
tuples and other sequences and iterables work.
"""
if not v:
return
self_insert = self._insert
values = super().setdefault(k, [])
for subv in v:
self_insert(k, subv)
values.extend(v)
def get(self, k, default=None):
"""Return the value for key *k* if present in the dictionary, else
*default*. If *default* is not given, ``None`` is returned.
This method never raises a :exc:`KeyError`.
To get all values under a key, use :meth:`OrderedMultiDict.getlist`.
"""
return super().get(k, [default])[-1]
def getlist(self, k, default=_MISSING):
"""Get all values for key *k* as a list, if *k* is in the
dictionary, else *default*. The list returned is a copy and
can be safely mutated. If *default* is not given, an empty
:class:`list` is returned.
"""
try:
return super().__getitem__(k)[:]
except KeyError:
if default is _MISSING:
return []
return default
def clear(self):
"Empty the dictionary."
super().clear()
self._clear_ll()
def setdefault(self, k, default=_MISSING):
"""If key *k* is in the dictionary, return its value. If not, insert
*k* with a value of *default* and return *default*. *default*
defaults to ``None``. See :meth:`dict.setdefault` for more
information.
"""
if not super().__contains__(k):
self[k] = None if default is _MISSING else default
return self[k]
def copy(self):
"Return a shallow copy of the dictionary."
return self.__class__(self.iteritems(multi=True))
@classmethod
def fromkeys(cls, keys, default=None):
"""Create a dictionary from a list of keys, with all the values
set to *default*, or ``None`` if *default* is not set.
"""
return cls([(k, default) for k in keys])
def update(self, E, **F):
"""Add items from a dictionary or iterable (and/or keyword arguments),
overwriting values under an existing key. See
:meth:`dict.update` for more details.
"""
# E and F are throwback names to the dict() __doc__
if E is self:
return
self_add = self.add
if isinstance(E, OrderedMultiDict):
for k in E:
if k in self:
del self[k]
for k, v in E.iteritems(multi=True):
self_add(k, v)
elif callable(getattr(E, 'keys', None)):
for k in E.keys():
self[k] = E[k]
else:
seen = set()
seen_add = seen.add
for k, v in E:
if k not in seen and k in self:
del self[k]
seen_add(k)
self_add(k, v)
for k in F:
self[k] = F[k]
return
def update_extend(self, E, **F):
"""Add items from a dictionary, iterable, and/or keyword
arguments without overwriting existing items present in the
dictionary. Like :meth:`update`, but adds to existing keys
instead of overwriting them.
"""
if E is self:
iterator = iter(E.items())
elif isinstance(E, OrderedMultiDict):
iterator = E.iteritems(multi=True)
elif hasattr(E, 'keys'):
iterator = ((k, E[k]) for k in E.keys())
else:
iterator = E
self_add = self.add
for k, v in iterator:
self_add(k, v)
def __setitem__(self, k, v):
if super().__contains__(k):
self._remove_all(k)
self._insert(k, v)
super().__setitem__(k, [v])
def __getitem__(self, k):
return super().__getitem__(k)[-1]
def __delitem__(self, k):
super().__delitem__(k)
self._remove_all(k)
def __eq__(self, other):
if self is other:
return True
try:
if len(other) != len(self):
return False
except TypeError:
return False
if isinstance(other, OrderedMultiDict):
selfi = self.iteritems(multi=True)
otheri = other.iteritems(multi=True)
zipped_items = zip_longest(selfi, otheri, fillvalue=(None, None))
for (selfk, selfv), (otherk, otherv) in zipped_items:
if selfk != otherk or selfv != otherv:
return False
if not(next(selfi, _MISSING) is _MISSING
and next(otheri, _MISSING) is _MISSING):
# leftovers (TODO: watch for StopIteration?)
return False
return True
elif hasattr(other, 'keys'):
for selfk in self:
try:
other[selfk] == self[selfk]
except KeyError:
return False
return True
return False
def __ne__(self, other):
return not (self == other)
def __ior__(self, other):
self.update(other)
return self
def pop(self, k, default=_MISSING):
"""Remove all values under key *k*, returning the most-recently
inserted value. Raises :exc:`KeyError` if the key is not
present and no *default* is provided.
"""
try:
return self.popall(k)[-1]
except KeyError:
if default is _MISSING:
raise KeyError(k)
return default
def popall(self, k, default=_MISSING):
"""Remove all values under key *k*, returning them in the form of
a list. Raises :exc:`KeyError` if the key is not present and no
*default* is provided.
"""
super_self = super()
if super_self.__contains__(k):
self._remove_all(k)
if default is _MISSING:
return super_self.pop(k)
return super_self.pop(k, default)
def poplast(self, k=_MISSING, default=_MISSING):
"""Remove and return the most-recently inserted value under the key
*k*, or the most-recently inserted key if *k* is not
provided. If no values remain under *k*, it will be removed
from the OMD. Raises :exc:`KeyError` if *k* is not present in
the dictionary, or the dictionary is empty.
"""
if k is _MISSING:
if self:
k = self.root[PREV][KEY]
else:
if default is _MISSING:
raise KeyError('empty %r' % type(self))
return default
try:
self._remove(k)
except KeyError:
if default is _MISSING:
raise KeyError(k)
return default
values = super().__getitem__(k)
v = values.pop()
if not values:
super().__delitem__(k)
return v
def _remove(self, k):
values = self._map[k]
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
if not values:
del self._map[k]
def _remove_all(self, k):
values = self._map[k]
while values:
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
del self._map[k]
def iteritems(self, multi=False):
"""Iterate over the OMD's items in insertion order. By default,
yields only the most-recently inserted value for each key. Set
*multi* to ``True`` to get all inserted items.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY], curr[VALUE]
curr = curr[NEXT]
else:
for key in self.iterkeys():
yield key, self[key]
def iterkeys(self, multi=False):
"""Iterate over the OMD's keys in insertion order. By default, yields
each key once, according to the most recent insertion. Set
*multi* to ``True`` to get all keys, including duplicates, in
insertion order.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
else:
yielded = set()
yielded_add = yielded.add
while curr is not root:
k = curr[KEY]
if k not in yielded:
yielded_add(k)
yield k
curr = curr[NEXT]
def itervalues(self, multi=False):
"""Iterate over the OMD's values in insertion order. By default,
yields the most-recently inserted value per unique key. Set
*multi* to ``True`` to get all values according to insertion
order.
"""
for k, v in self.iteritems(multi=multi):
yield v
def todict(self, multi=False):
"""Gets a basic :class:`dict` of the items in this dictionary. Keys
are the same as the OMD, values are the most recently inserted
values for each key.
Setting the *multi* arg to ``True`` is yields the same
result as calling :class:`dict` on the OMD, except that all the
value lists are copies that can be safely mutated.
"""
if multi:
return {k: self.getlist(k) for k in self}
return {k: self[k] for k in self}
def sorted(self, key=None, reverse=False):
"""Similar to the built-in :func:`sorted`, except this method returns
a new :class:`OrderedMultiDict` sorted by the provided key
function, optionally reversed.
Args:
key (callable): A callable to determine the sort key of
each element. The callable should expect an **item**
(key-value pair tuple).
reverse (bool): Set to ``True`` to reverse the ordering.
>>> omd = OrderedMultiDict(zip(range(3), range(3)))
>>> omd.sorted(reverse=True)
OrderedMultiDict([(2, 2), (1, 1), (0, 0)])
Note that the key function receives an **item** (key-value
tuple), so the recommended signature looks like:
>>> omd = OrderedMultiDict(zip('hello', 'world'))
>>> omd.sorted(key=lambda i: i[1]) # i[0] is the key, i[1] is the val
OrderedMultiDict([('o', 'd'), ('l', 'l'), ('e', 'o'), ('l', 'r'), ('h', 'w')])
"""
cls = self.__class__
return cls(sorted(self.iteritems(multi=True), key=key, reverse=reverse))
def sortedvalues(self, key=None, reverse=False):
"""Returns a copy of the :class:`OrderedMultiDict` with the same keys
in the same order as the original OMD, but the values within
each keyspace have been sorted according to *key* and
*reverse*.
Args:
key (callable): A single-argument callable to determine
the sort key of each element. The callable should expect
an **item** (key-value pair tuple).
reverse (bool): Set to ``True`` to reverse the ordering.
>>> omd = OrderedMultiDict()
>>> omd.addlist('even', [6, 2])
>>> omd.addlist('odd', [1, 5])
>>> omd.add('even', 4)
>>> omd.add('odd', 3)
>>> somd = omd.sortedvalues()
>>> somd.getlist('even')
[2, 4, 6]
>>> somd.keys(multi=True) == omd.keys(multi=True)
True
>>> omd == somd
False
>>> somd
OrderedMultiDict([('even', 2), ('even', 4), ('odd', 1), ('odd', 3), ('even', 6), ('odd', 5)])
As demonstrated above, contents and key order are
retained. Only value order changes.
"""
try:
superself_iteritems = super().iteritems()
except AttributeError:
superself_iteritems = super().items()
# (not reverse) because they pop off in reverse order for reinsertion
sorted_val_map = {k: sorted(v, key=key, reverse=(not reverse))
for k, v in superself_iteritems}
ret = self.__class__()
for k in self.iterkeys(multi=True):
ret.add(k, sorted_val_map[k].pop())
return ret
def inverted(self):
"""Returns a new :class:`OrderedMultiDict` with values and keys
swapped, like creating dictionary transposition or reverse
index. Insertion order is retained and all keys and values
are represented in the output.
>>> omd = OMD([(0, 2), (1, 2)])
>>> omd.inverted().getlist(2)
[0, 1]
Inverting twice yields a copy of the original:
>>> omd.inverted().inverted()
OrderedMultiDict([(0, 2), (1, 2)])
"""
return self.__class__((v, k) for k, v in self.iteritems(multi=True))
def counts(self):
"""Returns a mapping from key to number of values inserted under that
key. Like :py:class:`collections.Counter`, but returns a new
:class:`OrderedMultiDict`.
"""
# Returns an OMD because Counter/OrderedDict may not be
# available, and neither Counter nor dict maintain order.
super_getitem = super().__getitem__
return self.__class__((k, len(super_getitem(k))) for k in self)
def keys(self, multi=False):
"""Returns a list containing the output of :meth:`iterkeys`. See
that method's docs for more details.
"""
return list(self.iterkeys(multi=multi))
def values(self, multi=False):
"""Returns a list containing the output of :meth:`itervalues`. See
that method's docs for more details.
"""
return list(self.itervalues(multi=multi))
def items(self, multi=False):
"""Returns a list containing the output of :meth:`iteritems`. See
that method's docs for more details.
"""
return list(self.iteritems(multi=multi))
def __iter__(self):
return self.iterkeys()
def __reversed__(self):
root = self.root
curr = root[PREV]
lengths = {}
lengths_sd = lengths.setdefault
get_values = super().__getitem__
while curr is not root:
k = curr[KEY]
vals = get_values(k)
if lengths_sd(k, 1) == len(vals):
yield k
lengths[k] += 1
curr = curr[PREV]
def __repr__(self):
cn = self.__class__.__name__
kvs = ', '.join([repr((k, v)) for k, v in self.iteritems(multi=True)])
return f'{cn}([{kvs}])'
def viewkeys(self):
"OMD.viewkeys() -> a set-like object providing a view on OMD's keys"
return KeysView(self)
def viewvalues(self):
"OMD.viewvalues() -> an object providing a view on OMD's values"
return ValuesView(self)
def viewitems(self):
"OMD.viewitems() -> a set-like object providing a view on OMD's items"
return ItemsView(self)
try:
# try to import the built-in one anyways
from .dictutils import OrderedMultiDict
except ImportError:
pass
OMD = OrderedMultiDict
| OrderedMultiDict |
python | GoogleCloudPlatform__python-docs-samples | functions/firebase/main_test.py | {
"start": 720,
"end": 4959
} | class ____:
pass
def test_rtdb(capsys):
data = {"admin": True, "delta": {"id": "my-data"}}
context = Context()
context.resource = "my-resource"
main.hello_rtdb(data, context)
out, _ = capsys.readouterr()
assert "Function triggered by change to: my-resource" in out
assert "Admin?: True" in out
assert "my-data" in out
def test_firestore(capsys):
context = Context()
context.resource = "my-resource"
data = {"oldValue": {"a": 1}, "value": {"b": 2}}
main.hello_firestore(data, context)
out, _ = capsys.readouterr()
assert "Function triggered by change to: my-resource" in out
assert json.dumps(data["oldValue"]) in out
assert json.dumps(data["value"]) in out
def test_auth(capsys):
date_string = datetime.now().isoformat()
data = {
"uid": "my-user",
"metadata": {"createdAt": date_string},
"email": "me@example.com",
}
main.hello_auth(data, None)
out, _ = capsys.readouterr()
assert "Function triggered by creation/deletion of user: my-user" in out
assert date_string in out
assert "Email: me@example.com" in out
@patch("main.client")
def test_make_upper_case(firestore_mock, capsys):
firestore_mock.collection = MagicMock(return_value=firestore_mock)
firestore_mock.document = MagicMock(return_value=firestore_mock)
firestore_mock.set = MagicMock(return_value=firestore_mock)
user_id = str(uuid.uuid4())
date_string = datetime.now().isoformat()
email_string = "{}@{}.com".format(uuid.uuid4(), uuid.uuid4())
data = {
"uid": user_id,
"metadata": {"createdAt": date_string},
"email": email_string,
"value": {"fields": {"original": {"stringValue": "foobar"}}},
}
context = UserDict()
context.resource = "/documents/some_collection/path/some/path"
main.make_upper_case(data, context)
out, _ = capsys.readouterr()
assert "Replacing value: foobar --> FOOBAR" in out
firestore_mock.collection.assert_called_with("some_collection")
firestore_mock.document.assert_called_with("path/some/path")
firestore_mock.set.assert_called_with({"original": "FOOBAR"})
@patch("main.client")
def test_make_upper_case_ignores_already_uppercased(firestore_mock, capsys):
firestore_mock.collection = MagicMock(return_value=firestore_mock)
firestore_mock.document = MagicMock(return_value=firestore_mock)
firestore_mock.set = MagicMock(return_value=firestore_mock)
user_id = str(uuid.uuid4())
date_string = datetime.now().isoformat()
email_string = "{}@{}.com".format(uuid.uuid4(), uuid.uuid4())
data = {
"uid": user_id,
"metadata": {"createdAt": date_string},
"email": email_string,
"value": {"fields": {"original": {"stringValue": "FOOBAR"}}},
}
context = UserDict()
context.resource = "/documents/some_collection/path/some/path"
main.make_upper_case(data, context)
out, _ = capsys.readouterr()
assert "Value is already upper-case." in out
firestore_mock.set.assert_not_called()
def test_analytics(capsys):
timestamp = int(datetime.utcnow().timestamp())
data = {
"eventDim": [
{"name": "my-event", "timestampMicros": f"{str(timestamp)}000000"}
],
"userDim": {
"deviceInfo": {"deviceModel": "Pixel"},
"geoInfo": {"city": "London", "country": "UK"},
},
}
context = Context()
context.resource = "my-resource"
main.hello_analytics(data, context)
out, _ = capsys.readouterr()
assert "Function triggered by the following event: my-resource" in out
assert f"Timestamp: {datetime.utcfromtimestamp(timestamp)}" in out
assert "Name: my-event" in out
assert "Device Model: Pixel" in out
assert "Location: London, UK" in out
def test_remote_config(capsys):
data = {
"updateOrigin": "CONSOLE",
"updateType": "INCREMENTAL_UPDATE",
"versionNumber": "1",
}
context = Context()
main.hello_remote_config(data, context)
out, _ = capsys.readouterr()
assert "Update type: INCREMENTAL_UPDATE" in out
assert "Origin: CONSOLE" in out
assert "Version: 1" in out
| Context |
python | sqlalchemy__sqlalchemy | test/orm/test_loading.py | {
"start": 5018,
"end": 6537
} | class ____(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_cursor_close_exception_raised_in_iteration(self):
"""test #8710"""
User = self.classes.User
s = fixture_session()
stmt = select(User).execution_options(yield_per=1)
result = s.execute(stmt)
raw_cursor = result.raw
for row in result:
with expect_raises_message(Exception, "whoops"):
for row in result:
raise Exception("whoops")
is_true(raw_cursor._soft_closed)
def test_cursor_close_w_failed_rowproc(self):
User = self.classes.User
s = fixture_session()
q = s.query(User)
ctx = q._compile_context()
cursor = mock.Mock()
ctx.compile_state._entities = [
mock.Mock(row_processor=mock.Mock(side_effect=Exception("boom")))
]
assert_raises(Exception, loading.instances, cursor, ctx)
assert cursor.close.called, "Cursor wasn't closed"
def test_row_proc_not_created(self):
User = self.classes.User
s = fixture_session()
q = s.query(User.id, User.name)
stmt = select(User.id)
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'users.name'",
q.from_statement(stmt).all,
)
| InstancesTest |
python | TheAlgorithms__Python | data_structures/binary_tree/diff_views_of_binary_tree.py | {
"start": 301,
"end": 4827
} | class ____:
val: int
left: TreeNode | None = None
right: TreeNode | None = None
def make_tree() -> TreeNode:
"""
>>> make_tree().val
3
"""
return TreeNode(3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7)))
def binary_tree_right_side_view(root: TreeNode) -> list[int]:
r"""
Function returns the right side view of binary tree.
3 <- 3
/ \
9 20 <- 20
/ \
15 7 <- 7
>>> binary_tree_right_side_view(make_tree())
[3, 20, 7]
>>> binary_tree_right_side_view(None)
[]
"""
def depth_first_search(
root: TreeNode | None, depth: int, right_view: list[int]
) -> None:
"""
A depth first search preorder traversal to append the values at
right side of tree.
"""
if not root:
return
if depth == len(right_view):
right_view.append(root.val)
depth_first_search(root.right, depth + 1, right_view)
depth_first_search(root.left, depth + 1, right_view)
right_view: list = []
if not root:
return right_view
depth_first_search(root, 0, right_view)
return right_view
def binary_tree_left_side_view(root: TreeNode) -> list[int]:
r"""
Function returns the left side view of binary tree.
3 -> 3
/ \
9 -> 9 20
/ \
15 -> 15 7
>>> binary_tree_left_side_view(make_tree())
[3, 9, 15]
>>> binary_tree_left_side_view(None)
[]
"""
def depth_first_search(
root: TreeNode | None, depth: int, left_view: list[int]
) -> None:
"""
A depth first search preorder traversal to append the values
at left side of tree.
"""
if not root:
return
if depth == len(left_view):
left_view.append(root.val)
depth_first_search(root.left, depth + 1, left_view)
depth_first_search(root.right, depth + 1, left_view)
left_view: list = []
if not root:
return left_view
depth_first_search(root, 0, left_view)
return left_view
def binary_tree_top_side_view(root: TreeNode) -> list[int]:
r"""
Function returns the top side view of binary tree.
9 3 20 7
⬇ ⬇ ⬇ ⬇
3
/ \
9 20
/ \
15 7
>>> binary_tree_top_side_view(make_tree())
[9, 3, 20, 7]
>>> binary_tree_top_side_view(None)
[]
"""
def breadth_first_search(root: TreeNode, top_view: list[int]) -> None:
"""
A breadth first search traversal with defaultdict ds to append
the values of tree from top view
"""
queue = [(root, 0)]
lookup = defaultdict(list)
while queue:
first = queue.pop(0)
node, hd = first
lookup[hd].append(node.val)
if node.left:
queue.append((node.left, hd - 1))
if node.right:
queue.append((node.right, hd + 1))
for pair in sorted(lookup.items(), key=lambda each: each[0]):
top_view.append(pair[1][0])
top_view: list = []
if not root:
return top_view
breadth_first_search(root, top_view)
return top_view
def binary_tree_bottom_side_view(root: TreeNode) -> list[int]:
r"""
Function returns the bottom side view of binary tree
3
/ \
9 20
/ \
15 7
↑ ↑ ↑ ↑
9 15 20 7
>>> binary_tree_bottom_side_view(make_tree())
[9, 15, 20, 7]
>>> binary_tree_bottom_side_view(None)
[]
"""
from collections import defaultdict
def breadth_first_search(root: TreeNode, bottom_view: list[int]) -> None:
"""
A breadth first search traversal with defaultdict ds to append
the values of tree from bottom view
"""
queue = [(root, 0)]
lookup = defaultdict(list)
while queue:
first = queue.pop(0)
node, hd = first
lookup[hd].append(node.val)
if node.left:
queue.append((node.left, hd - 1))
if node.right:
queue.append((node.right, hd + 1))
for pair in sorted(lookup.items(), key=lambda each: each[0]):
bottom_view.append(pair[1][-1])
bottom_view: list = []
if not root:
return bottom_view
breadth_first_search(root, bottom_view)
return bottom_view
if __name__ == "__main__":
import doctest
doctest.testmod()
| TreeNode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 25533,
"end": 28312
} | class ____(HttpRequester):
"""
Custom HTTP requester for custom query streams.
"""
parameters: Mapping[str, Any]
def __post_init__(self, parameters: Mapping[str, Any]):
super().__post_init__(parameters=parameters)
self.query = GAQL.parse(parameters.get("query"))
@staticmethod
def is_metrics_in_custom_query(query: GAQL) -> bool:
for field in query.fields:
if field.split(".")[0] == "metrics":
return True
return False
@staticmethod
def is_custom_query_incremental(query: GAQL) -> bool:
time_segment_in_select, time_segment_in_where = ["segments.date" in clause for clause in [query.fields, query.where]]
return time_segment_in_select and not time_segment_in_where
@staticmethod
def _insert_segments_date_expr(query: GAQL, start_date: str, end_date: str) -> GAQL:
if "segments.date" not in query.fields:
query = query.append_field("segments.date")
condition = f"segments.date BETWEEN '{start_date}' AND '{end_date}'"
if query.where:
return query.set_where(query.where + " AND " + condition)
return query.set_where(condition)
def get_request_body_json(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
query = self._build_query(stream_slice)
return {"query": query}
def get_request_headers(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {
"developer-token": self.config["credentials"]["developer_token"],
"login-customer-id": stream_slice["parent_slice"]["customer_id"],
}
def _build_query(self, stream_slice: StreamSlice) -> str:
is_incremental = self.is_custom_query_incremental(self.query)
if is_incremental:
start_date = stream_slice["start_time"]
end_date = stream_slice["end_time"]
return str(self._insert_segments_date_expr(self.query, start_date, end_date))
else:
return str(self.query)
def _get_resource_name(self) -> str:
"""
Extract the resource name from the `FROM` clause of the query.
e.g. Parses a query "SELECT field1, field2, field3 FROM table" and returns "table".
"""
query_upper = self.query.upper()
from_index = query_upper.find("FROM")
return self.query[from_index + 4 :].strip()
@dataclass()
| CustomGAQueryHttpRequester |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 13886,
"end": 23847
} | class ____(NonStrictDataModel):
"""
:param id: Worker ID
:type id: str
:param user: Associated user (under whose credentials are used by the worker
daemon)
:type user: IdNameEntry
:param company: Associated company
:type company: IdNameEntry
:param ip: IP of the worker
:type ip: str
:param register_time: Registration time
:type register_time: datetime.datetime
:param last_activity_time: Last activity time (even if an error occurred)
:type last_activity_time: datetime.datetime
:param last_report_time: Last successful report time
:type last_report_time: datetime.datetime
:param task: Task currently being run by the worker
:type task: CurrentTaskEntry
:param project: Project in which currently executing task resides
:type project: IdNameEntry
:param queue: Queue from which running task was taken
:type queue: QueueEntry
:param queues: List of queues on which the worker is listening
:type queues: Sequence[QueueEntry]
:param tags: User tags for the worker
:type tags: Sequence[str]
"""
_schema = {
"properties": {
"company": {
"description": "Associated company",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
"id": {"description": "Worker ID", "type": ["string", "null"]},
"ip": {"description": "IP of the worker", "type": ["string", "null"]},
"last_activity_time": {
"description": "Last activity time (even if an error occurred)",
"format": "date-time",
"type": ["string", "null"],
},
"last_report_time": {
"description": "Last successful report time",
"format": "date-time",
"type": ["string", "null"],
},
"project": {
"description": "Project in which currently executing task resides",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
"queue": {
"description": "Queue from which running task was taken",
"oneOf": [{"$ref": "#/definitions/queue_entry"}, {"type": "null"}],
},
"queues": {
"description": "List of queues on which the worker is listening",
"items": {"$ref": "#/definitions/queue_entry"},
"type": ["array", "null"],
},
"register_time": {
"description": "Registration time",
"format": "date-time",
"type": ["string", "null"],
},
"tags": {
"description": "User tags for the worker",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task currently being run by the worker",
"oneOf": [
{"$ref": "#/definitions/current_task_entry"},
{"type": "null"},
],
},
"user": {
"description": "Associated user (under whose credentials are used by the worker daemon)",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
user: Any = None,
company: Any = None,
ip: Optional[str] = None,
register_time: Optional[str] = None,
last_activity_time: Optional[str] = None,
last_report_time: Optional[str] = None,
task: Any = None,
project: Any = None,
queue: Any = None,
queues: Optional[List[Any]] = None,
tags: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(Worker, self).__init__(**kwargs)
self.id = id
self.user = user
self.company = company
self.ip = ip
self.register_time = register_time
self.last_activity_time = last_activity_time
self.last_report_time = last_report_time
self.task = task
self.project = project
self.queue = queue
self.queues = queues
self.tags = tags
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("user")
def user(self) -> Any:
return self._property_user
@user.setter
def user(self, value: Any) -> None:
if value is None:
self._property_user = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "user", IdNameEntry)
self._property_user = value
@schema_property("company")
def company(self) -> Any:
return self._property_company
@company.setter
def company(self, value: Any) -> None:
if value is None:
self._property_company = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "company", IdNameEntry)
self._property_company = value
@schema_property("ip")
def ip(self) -> Optional[str]:
return self._property_ip
@ip.setter
def ip(self, value: Optional[str]) -> None:
if value is None:
self._property_ip = None
return
self.assert_isinstance(value, "ip", six.string_types)
self._property_ip = value
@schema_property("register_time")
def register_time(self) -> Optional[str]:
return self._property_register_time
@register_time.setter
def register_time(self, value: Optional[str]) -> None:
if value is None:
self._property_register_time = None
return
self.assert_isinstance(value, "register_time", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_register_time = value
@schema_property("last_activity_time")
def last_activity_time(self) -> Optional[str]:
return self._property_last_activity_time
@last_activity_time.setter
def last_activity_time(self, value: Optional[str]) -> None:
if value is None:
self._property_last_activity_time = None
return
self.assert_isinstance(value, "last_activity_time", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_activity_time = value
@schema_property("last_report_time")
def last_report_time(self) -> Optional[str]:
return self._property_last_report_time
@last_report_time.setter
def last_report_time(self, value: Optional[str]) -> None:
if value is None:
self._property_last_report_time = None
return
self.assert_isinstance(value, "last_report_time", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_report_time = value
@schema_property("task")
def task(self) -> Any:
return self._property_task
@task.setter
def task(self, value: Any) -> None:
if value is None:
self._property_task = None
return
if isinstance(value, dict):
value = CurrentTaskEntry.from_dict(value)
else:
self.assert_isinstance(value, "task", CurrentTaskEntry)
self._property_task = value
@schema_property("project")
def project(self) -> Any:
return self._property_project
@project.setter
def project(self, value: Any) -> None:
if value is None:
self._property_project = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "project", IdNameEntry)
self._property_project = value
@schema_property("queue")
def queue(self) -> Any:
return self._property_queue
@queue.setter
def queue(self, value: Any) -> None:
if value is None:
self._property_queue = None
return
if isinstance(value, dict):
value = QueueEntry.from_dict(value)
else:
self.assert_isinstance(value, "queue", QueueEntry)
self._property_queue = value
@schema_property("queues")
def queues(self) -> Optional[List[Any]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [QueueEntry.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "queues", QueueEntry, is_array=True)
self._property_queues = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
| Worker |
python | pandas-dev__pandas | pandas/tests/indexing/test_iloc.py | {
"start": 51669,
"end": 54098
} | class ____:
def test_iloc(self):
ser = Series(
np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2))
)
ser_original = ser.copy()
for i in range(len(ser)):
result = ser.iloc[i]
exp = ser[ser.index[i]]
tm.assert_almost_equal(result, exp)
# pass a slice
result = ser.iloc[slice(1, 3)]
expected = ser.loc[2:4]
tm.assert_series_equal(result, expected)
# test slice is a view
with tm.assert_produces_warning(None):
# GH#45324 make sure we aren't giving a spurious FutureWarning
result[:] = 0
tm.assert_series_equal(ser, ser_original)
# list of integers
result = ser.iloc[[0, 2, 3, 4, 5]]
expected = ser.reindex(ser.index[[0, 2, 3, 4, 5]])
tm.assert_series_equal(result, expected)
def test_iloc_getitem_nonunique(self):
ser = Series([0, 1, 2], index=[0, 1, 0])
assert ser.iloc[2] == 2
def test_iloc_setitem_pure_position_based(self):
# GH#22046
ser1 = Series([1, 2, 3])
ser2 = Series([4, 5, 6], index=[1, 0, 2])
ser1.iloc[1:3] = ser2.iloc[1:3]
expected = Series([1, 5, 6])
tm.assert_series_equal(ser1, expected)
def test_iloc_nullable_int64_size_1_nan(self):
# GH 31861
result = DataFrame({"a": ["test"], "b": [np.nan]})
ser = Series([NA], name="b", dtype="Int64")
with pytest.raises(TypeError, match="Invalid value"):
result.loc[:, "b"] = ser
def test_iloc_arrow_extension_array(self):
# GH#61311
pytest.importorskip("pyarrow")
df = DataFrame({"a": [1, 2], "c": [0, 2], "d": ["c", "a"]})
df_arrow = DataFrame(
{"a": [1, 2], "c": [0, 2], "d": ["c", "a"]}
).convert_dtypes(dtype_backend="pyarrow")
expected = df.iloc[:, df["c"]]
result = df_arrow.iloc[:, df_arrow["c"]]
tm.assert_frame_equal(result, expected, check_dtype=False)
@td.skip_if_no("pyarrow")
def test_setitem_pyarrow_int_series(self):
# GH#62462
ser = Series([1, 2, 3], dtype="int64[pyarrow]")
idx = Index([0, 1])
vals = Series([7, 8], dtype="int64[pyarrow]")
ser.iloc[idx] = vals
expected = Series([7, 8, 3], dtype="int64[pyarrow]")
tm.assert_series_equal(ser, expected)
| TestILocSeries |
python | pypa__pip | src/pip/_internal/resolution/resolvelib/base.py | {
"start": 782,
"end": 2306
} | class ____:
specifier: SpecifierSet
hashes: Hashes
links: frozenset[Link]
@classmethod
def empty(cls) -> Constraint:
return Constraint(SpecifierSet(), Hashes(), frozenset())
@classmethod
def from_ireq(cls, ireq: InstallRequirement) -> Constraint:
links = frozenset([ireq.link]) if ireq.link else frozenset()
return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)
def __bool__(self) -> bool:
return bool(self.specifier) or bool(self.hashes) or bool(self.links)
def __and__(self, other: InstallRequirement) -> Constraint:
if not isinstance(other, InstallRequirement):
return NotImplemented
specifier = self.specifier & other.specifier
hashes = self.hashes & other.hashes(trust_internet=False)
links = self.links
if other.link:
links = links.union([other.link])
return Constraint(specifier, hashes, links)
def is_satisfied_by(self, candidate: Candidate) -> bool:
# Reject if there are any mismatched URL constraints on this package.
if self.links and not all(_match_link(link, candidate) for link in self.links):
return False
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
return self.specifier.contains(candidate.version, prereleases=True)
| Constraint |
python | PyCQA__bandit | tests/unit/core/test_context.py | {
"start": 163,
"end": 10442
} | class ____(testtools.TestCase):
def test_context_create(self):
ref_context = mock.Mock()
new_context = context.Context(context_object=ref_context)
self.assertEqual(ref_context, new_context._context)
new_context = context.Context()
self.assertIsInstance(new_context._context, dict)
def test_repr(self):
ref_object = dict(spam="eggs")
expected_repr = f"<Context {ref_object}>"
new_context = context.Context(context_object=ref_object)
self.assertEqual(expected_repr, repr(new_context))
@mock.patch("bandit.core.context.Context._get_literal_value")
def test_call_args(self, get_literal_value):
get_literal_value.return_value = "eggs"
ref_call = mock.Mock()
ref_call.args = [mock.Mock(attr="spam"), "eggs"]
ref_context = dict(call=ref_call)
new_context = context.Context(context_object=ref_context)
expected_args = ["spam", "eggs"]
self.assertListEqual(expected_args, new_context.call_args)
def test_call_args_count(self):
ref_call = mock.Mock()
ref_call.args = ["spam", "eggs"]
ref_context = dict(call=ref_call)
new_context = context.Context(context_object=ref_context)
self.assertEqual(len(ref_call.args), new_context.call_args_count)
ref_context = dict(call={})
new_context = context.Context(context_object=ref_context)
self.assertIsNone(new_context.call_args_count)
new_context = context.Context()
self.assertIsNone(new_context.call_args_count)
def test_call_function_name(self):
expected_string = "spam"
ref_context = dict(name=expected_string)
new_context = context.Context(context_object=ref_context)
self.assertEqual(expected_string, new_context.call_function_name)
new_context = context.Context()
self.assertIsNone(new_context.call_function_name)
def test_call_function_name_qual(self):
expected_string = "spam"
ref_context = dict(qualname=expected_string)
new_context = context.Context(context_object=ref_context)
self.assertEqual(expected_string, new_context.call_function_name_qual)
new_context = context.Context()
self.assertIsNone(new_context.call_function_name_qual)
@mock.patch("bandit.core.context.Context._get_literal_value")
def test_call_keywords(self, get_literal_value):
get_literal_value.return_value = "eggs"
ref_keyword1 = mock.Mock(arg="arg1", value=mock.Mock(attr="spam"))
ref_keyword2 = mock.Mock(arg="arg2", value="eggs")
ref_call = mock.Mock()
ref_call.keywords = [ref_keyword1, ref_keyword2]
ref_context = dict(call=ref_call)
new_context = context.Context(context_object=ref_context)
expected_dict = dict(arg1="spam", arg2="eggs")
self.assertDictEqual(expected_dict, new_context.call_keywords)
ref_context = dict(call=None)
new_context = context.Context(context_object=ref_context)
self.assertIsNone(new_context.call_keywords)
new_context = context.Context()
self.assertIsNone(new_context.call_keywords)
def test_node(self):
expected_node = "spam"
ref_context = dict(node=expected_node)
new_context = context.Context(context_object=ref_context)
self.assertEqual(expected_node, new_context.node)
new_context = context.Context()
self.assertIsNone(new_context.node)
def test_string_val(self):
expected_string = "spam"
ref_context = dict(str=expected_string)
new_context = context.Context(context_object=ref_context)
self.assertEqual(expected_string, new_context.string_val)
new_context = context.Context()
self.assertIsNone(new_context.string_val)
def test_statement(self):
expected_string = "spam"
ref_context = dict(statement=expected_string)
new_context = context.Context(context_object=ref_context)
self.assertEqual(expected_string, new_context.statement)
new_context = context.Context()
self.assertIsNone(new_context.statement)
@mock.patch("bandit.core.utils.get_qual_attr")
def test_function_def_defaults_qual(self, get_qual_attr):
get_qual_attr.return_value = "spam"
ref_node = mock.Mock(args=mock.Mock(defaults=["spam"]))
ref_context = dict(node=ref_node, import_aliases=None)
new_context = context.Context(context_object=ref_context)
self.assertListEqual(["spam"], new_context.function_def_defaults_qual)
ref_node = mock.Mock(args=mock.Mock(defaults=[]))
ref_context = dict(node=ref_node, import_aliases=None)
new_context = context.Context(context_object=ref_context)
self.assertListEqual([], new_context.function_def_defaults_qual)
new_context = context.Context()
self.assertListEqual([], new_context.function_def_defaults_qual)
def test__get_literal_value(self):
new_context = context.Context()
value = ast.Constant(42)
expected = value.value
self.assertEqual(expected, new_context._get_literal_value(value))
value = ast.Constant("spam")
expected = value.value
self.assertEqual(expected, new_context._get_literal_value(value))
value = ast.List([ast.Constant("spam"), ast.Constant(42)], ast.Load())
expected = [ast.Constant("spam").value, ast.Constant(42).value]
self.assertListEqual(expected, new_context._get_literal_value(value))
value = ast.Tuple([ast.Constant("spam"), ast.Constant(42)], ast.Load())
expected = (ast.Constant("spam").value, ast.Constant(42).value)
self.assertTupleEqual(expected, new_context._get_literal_value(value))
value = ast.Set([ast.Constant("spam"), ast.Constant(42)])
expected = {ast.Constant("spam").value, ast.Constant(42).value}
self.assertSetEqual(expected, new_context._get_literal_value(value))
value = ast.Dict(["spam", "eggs"], [42, "foo"])
expected = dict(spam=42, eggs="foo")
self.assertDictEqual(expected, new_context._get_literal_value(value))
value = ast.Name("spam", ast.Load())
expected = value.id
self.assertEqual(expected, new_context._get_literal_value(value))
value = ast.Constant(b"spam")
expected = value.value
self.assertEqual(expected, new_context._get_literal_value(value))
self.assertIsNone(new_context._get_literal_value(None))
@mock.patch(
"bandit.core.context.Context.call_keywords",
new_callable=mock.PropertyMock,
)
def test_check_call_arg_value(self, call_keywords):
new_context = context.Context()
call_keywords.return_value = dict(spam="eggs")
self.assertTrue(new_context.check_call_arg_value("spam", "eggs"))
self.assertTrue(
new_context.check_call_arg_value("spam", ["spam", "eggs"])
)
self.assertFalse(new_context.check_call_arg_value("spam", "spam"))
self.assertFalse(new_context.check_call_arg_value("spam"))
self.assertFalse(new_context.check_call_arg_value("eggs"))
new_context = context.Context()
self.assertIsNone(new_context.check_call_arg_value(None))
@mock.patch(
"bandit.core.context.Context.node", new_callable=mock.PropertyMock
)
def test_get_lineno_for_call_arg(self, node):
expected_lineno = 42
keyword1 = mock.Mock(
arg="spam", value=mock.Mock(lineno=expected_lineno)
)
node.return_value = mock.Mock(keywords=[keyword1])
new_context = context.Context()
actual_lineno = new_context.get_lineno_for_call_arg("spam")
self.assertEqual(expected_lineno, actual_lineno)
new_context = context.Context()
missing_lineno = new_context.get_lineno_for_call_arg("eggs")
self.assertIsNone(missing_lineno)
def test_get_call_arg_at_position(self):
expected_arg = "spam"
ref_call = mock.Mock()
ref_call.args = [ast.Constant(expected_arg)]
ref_context = dict(call=ref_call)
new_context = context.Context(context_object=ref_context)
self.assertEqual(expected_arg, new_context.get_call_arg_at_position(0))
self.assertIsNone(new_context.get_call_arg_at_position(1))
ref_call = mock.Mock()
ref_call.args = []
ref_context = dict(call=ref_call)
new_context = context.Context(context_object=ref_context)
self.assertIsNone(new_context.get_call_arg_at_position(0))
new_context = context.Context()
self.assertIsNone(new_context.get_call_arg_at_position(0))
def test_is_module_being_imported(self):
ref_context = dict(module="spam")
new_context = context.Context(context_object=ref_context)
self.assertTrue(new_context.is_module_being_imported("spam"))
self.assertFalse(new_context.is_module_being_imported("eggs"))
new_context = context.Context()
self.assertFalse(new_context.is_module_being_imported("spam"))
def test_is_module_imported_exact(self):
ref_context = dict(imports=["spam"])
new_context = context.Context(context_object=ref_context)
self.assertTrue(new_context.is_module_imported_exact("spam"))
self.assertFalse(new_context.is_module_imported_exact("eggs"))
new_context = context.Context()
self.assertFalse(new_context.is_module_being_imported("spam"))
def test_is_module_imported_like(self):
ref_context = dict(imports=[["spam"], ["eggs"]])
new_context = context.Context(context_object=ref_context)
self.assertTrue(new_context.is_module_imported_like("spam"))
self.assertFalse(new_context.is_module_imported_like("bacon"))
new_context = context.Context()
self.assertFalse(new_context.is_module_imported_like("spam"))
def test_filename(self):
ref_context = dict(filename="spam.py")
new_context = context.Context(context_object=ref_context)
self.assertEqual(new_context.filename, "spam.py")
new_context = context.Context()
self.assertIsNone(new_context.filename)
| ContextTests |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 254009,
"end": 254413
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "commit", "ref")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
commit = sgqlc.types.Field("Commit", graphql_name="commit")
ref = sgqlc.types.Field("Ref", graphql_name="ref")
| CreateCommitOnBranchPayload |
python | pytorch__pytorch | test/distributed/_shard/sharding_plan/test_sharding_plan.py | {
"start": 1175,
"end": 1696
} | class ____(ShardingPlanner):
dim = 0
devices = []
def __init__(self, chunk_dim=0, device_count=0):
self.dim = chunk_dim
self.devices = [f"rank:{i}/cuda:{i}" for i in range(device_count)]
def build_plan(self, module: nn.Module) -> ShardingPlan:
named_params = module.named_parameters()
plan = {}
for name, _ in named_params:
plan[name] = ChunkShardingSpec(self.dim, placements=self.devices)
return ShardingPlan(plan=plan)
| ChunkAllShardingPlanner |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 15545,
"end": 15609
} | class ____(FacetField, DateTimeField):
pass
| FacetDateTimeField |
python | doocs__leetcode | solution/1400-1499/1415.The k-th Lexicographical String of All Happy Strings of Length n/Solution.py | {
"start": 0,
"end": 482
} | class ____:
def getHappyString(self, n: int, k: int) -> str:
def dfs():
if len(s) == n:
ans.append("".join(s))
return
if len(ans) >= k:
return
for c in "abc":
if not s or s[-1] != c:
s.append(c)
dfs()
s.pop()
ans = []
s = []
dfs()
return "" if len(ans) < k else ans[k - 1]
| Solution |
python | PrefectHQ__prefect | tests/blocks/test_abstract.py | {
"start": 11248,
"end": 12933
} | class ____:
def test_secret_block_is_abstract(self):
with pytest.raises(
TypeError, match="Can't instantiate abstract class SecretBlock"
):
SecretBlock()
def test_secret_block_implementation(self, caplog):
class ASecretBlock(SecretBlock):
secret_name: str
def __init__(self, secret_name: str):
super().__init__(secret_name=secret_name)
self._secrets = {}
def read_secret(self):
if self.secret_name not in self._secrets:
raise KeyError("Secret does not exist")
return self._secrets[self.secret_name]
def write_secret(self, secret_value):
if self.secret_name in self._secrets:
raise ValueError("Secret already exists")
self._secrets[self.secret_name] = secret_value
def update_secret(self, secret_value):
self._secrets[self.secret_name] = secret_value
def delete_secret(self):
del self._secrets[self.secret_name]
a_secret_block = ASecretBlock(secret_name="secret_name")
a_secret_block.write_secret("hello")
assert a_secret_block.read_secret() == "hello"
with pytest.raises(ValueError, match="Secret already exists"):
a_secret_block.write_secret("hello again")
a_secret_block.update_secret("hello again")
assert a_secret_block.read_secret() == "hello again"
a_secret_block.delete_secret()
with pytest.raises(KeyError, match="Secret does not exist"):
assert a_secret_block.read_secret()
| TestSecretBlock |
python | huggingface__transformers | src/transformers/models/starcoder2/modeling_starcoder2.py | {
"start": 2500,
"end": 6662
} | class ____(nn.Module):
def __init__(self, config: Starcoder2Config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
self.act = ACT2FN[config.hidden_act]
self.residual_dropout = config.residual_dropout
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
return hidden_states
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Starcoder2MLP |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 340408,
"end": 354205
} | class ____(FieldChannelMixin, core.LatLongFieldDef):
r"""
Longitude schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : Literal['quantitative']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Longitude: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> Longitude: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> Longitude: ...
@overload
def bandPosition(self, _: float, /) -> Longitude: ...
@overload
def bin(self, _: None, /) -> Longitude: ...
@overload
def field(self, _: str | RepeatRef, /) -> Longitude: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Longitude: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Longitude: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Longitude: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Longitude: ...
@overload
def type(self, _: Literal["quantitative"], /) -> Longitude: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[Literal["quantitative"]] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| Longitude |
python | realpython__materials | python-self-type/accounts_typevar.py | {
"start": 207,
"end": 806
} | class ____:
account_number: int
balance: float
def display_balance(self: TBankAccount) -> TBankAccount:
print(f"Account Number: {self.account_number}")
print(f"Balance: ${self.balance:,.2f}\n")
return self
def deposit(self: TBankAccount, amount: float) -> TBankAccount:
self.balance += amount
return self
def withdraw(self: TBankAccount, amount: float) -> TBankAccount:
if self.balance >= amount:
self.balance -= amount
else:
print("Insufficient balance")
return self
@dataclass
| BankAccount |
python | apache__thrift | test/crossrunner/test.py | {
"start": 3454,
"end": 5464
} | class ____(object):
def __init__(self, testdir, server, client, delay, timeout, **kwargs):
self.testdir = testdir
self._log = multiprocessing.get_logger()
self._config = kwargs
self.protocol = kwargs['protocol']
self.transport = kwargs['transport']
self.socket = kwargs['socket']
srv_dict = self._fix_workdir(merge_dict(self._config, server))
cli_dict = self._fix_workdir(merge_dict(self._config, client))
cli_dict['extra_args2'] = srv_dict.pop('remote_args', [])
srv_dict['extra_args2'] = cli_dict.pop('remote_args', [])
self.server = TestProgram('server', **srv_dict)
self.client = TestProgram('client', **cli_dict)
self.delay = delay
self.timeout = timeout
self._name = None
# results
self.success = None
self.as_expected = None
self.returncode = None
self.expired = False
self.retry_count = 0
def _fix_workdir(self, config):
key = 'workdir'
path = config.get(key, None)
if not path:
path = self.testdir
if os.path.isabs(path):
path = os.path.realpath(path)
else:
path = os.path.realpath(os.path.join(self.testdir, path))
config.update({key: path})
return config
@classmethod
def get_name(cls, server, client, protocol, transport, socket, *args, **kwargs):
return '%s-%s_%s_%s-%s' % (server, client, protocol, transport, socket)
@property
def name(self):
if not self._name:
self._name = self.get_name(
self.server.name, self.client.name, self.protocol, self.transport, self.socket)
return self._name
@property
def transport_name(self):
return '%s-%s' % (self.transport, self.socket)
def test_name(server, client, protocol, transport, socket, **kwargs):
return TestEntry.get_name(server['name'], client['name'], protocol, transport, socket)
| TestEntry |
python | doocs__leetcode | solution/3000-3099/3005.Count Elements With Maximum Frequency/Solution.py | {
"start": 0,
"end": 190
} | class ____:
def maxFrequencyElements(self, nums: List[int]) -> int:
cnt = Counter(nums)
mx = max(cnt.values())
return sum(x for x in cnt.values() if x == mx)
| Solution |
python | scikit-image__scikit-image | tests/skimage/measure/test_ccomp.py | {
"start": 3262,
"end": 7397
} | class ____:
def setup_method(self):
self.x = np.zeros((3, 4, 5), int)
self.x[0] = np.array(
[[0, 3, 2, 1, 9], [0, 1, 9, 2, 9], [0, 1, 9, 9, 9], [3, 1, 5, 3, 0]]
)
self.x[1] = np.array(
[[3, 3, 2, 1, 9], [0, 3, 9, 2, 1], [0, 3, 3, 1, 1], [3, 1, 3, 3, 0]]
)
self.x[2] = np.array(
[[3, 3, 8, 8, 0], [2, 3, 9, 8, 8], [2, 3, 0, 8, 0], [2, 1, 0, 0, 0]]
)
self.labels = np.zeros((3, 4, 5), int)
self.labels[0] = np.array(
[[0, 1, 2, 3, 4], [0, 5, 4, 2, 4], [0, 5, 4, 4, 4], [1, 5, 6, 1, 0]]
)
self.labels[1] = np.array(
[[1, 1, 2, 3, 4], [0, 1, 4, 2, 3], [0, 1, 1, 3, 3], [1, 5, 1, 1, 0]]
)
self.labels[2] = np.array(
[[1, 1, 7, 7, 0], [8, 1, 4, 7, 7], [8, 1, 0, 7, 0], [8, 5, 0, 0, 0]]
)
def test_basic(self):
labels = label(self.x)
assert_array_equal(labels, self.labels)
assert self.x[0, 0, 2] == 2, "Data was modified!"
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.zeros((3, 3, 3), int)
x[0, 2, 2] = 1
x[1, 1, 1] = 1
x[2, 0, 0] = 1
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label4 = x.copy()
label4[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label4)
assert_array_equal(label(x, connectivity=3), x)
def test_connectivity_1_vs_2(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label1 = x.copy()
label1[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label1)
assert_array_equal(label(x, connectivity=3), x)
def test_background(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[1, 0, 0], [1, 0, 0], [0, 0, 0]])
x[1] = np.array([[0, 0, 0], [0, 1, 5], [0, 0, 0]])
lnb = x.copy()
lnb[0] = np.array([[1, 2, 2], [1, 2, 2], [2, 2, 2]])
lnb[1] = np.array([[2, 2, 2], [2, 1, 3], [2, 2, 2]])
lb = x.copy()
lb[0] = np.array([[1, BG, BG], [1, BG, BG], [BG, BG, BG]])
lb[1] = np.array([[BG, BG, BG], [BG, 1, 2], [BG, BG, BG]])
assert_array_equal(label(x), lb)
assert_array_equal(label(x, background=-1), lnb)
def test_background_two_regions(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[0, 0, 6], [0, 0, 6], [5, 5, 5]])
x[1] = np.array([[6, 6, 0], [5, 0, 0], [0, 0, 0]])
lb = x.copy()
lb[0] = np.array([[BG, BG, 1], [BG, BG, 1], [2, 2, 2]])
lb[1] = np.array([[1, 1, BG], [2, BG, BG], [BG, BG, BG]])
res = label(x, background=0)
assert_array_equal(res, lb)
def test_background_one_region_center(self):
x = np.zeros((3, 3, 3), int)
x[1, 1, 1] = 1
lb = np.ones_like(x) * BG
lb[1, 1, 1] = 1
assert_array_equal(label(x, connectivity=1, background=0), lb)
def test_return_num(self):
x = np.array([[1, 0, 6], [0, 0, 6], [5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
def test_1D(self):
x = np.array((0, 1, 2, 2, 1, 1, 0, 0))
xlen = len(x)
y = np.array((0, 1, 2, 2, 3, 3, 0, 0))
reshapes = (
(xlen,),
(1, xlen),
(xlen, 1),
(1, xlen, 1),
(xlen, 1, 1),
(1, 1, xlen),
)
for reshape in reshapes:
x2 = x.reshape(reshape)
labelled = label(x2)
assert_array_equal(y, labelled.flatten())
def test_nd(self):
x = np.ones((1, 2, 3, 4))
with testing.raises(NotImplementedError):
label(x)
| TestConnectedComponents3d |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_tasks.py | {
"start": 3480,
"end": 11753
} | class ____(TestTaskEndpoint):
def test_should_respond_200(self, test_client):
expected = {
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.providers.standard.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [self.task_id2],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {"foo": {"value": "bar", "schema": {}, "description": None, "source": "task"}},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": 0,
"start_date": "2020-06-15T00:00:00Z",
"task_id": "op1",
"task_display_name": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
"doc_md": None,
}
response = test_client.get(
f"{self.api_prefix}/{self.dag_id}/tasks/{self.task_id}",
)
assert response.status_code == 200
assert response.json() == expected
def test_mapped_task(self, test_client):
expected = {
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.providers.standard.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"is_mapped": True,
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "microseconds": 0, "seconds": 300},
"retry_exponential_backoff": 0,
"start_date": "2020-06-15T00:00:00Z",
"task_id": "mapped_task",
"task_display_name": "mapped_task",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"doc_md": None,
}
response = test_client.get(
f"{self.api_prefix}/{self.mapped_dag_id}/tasks/{self.mapped_task_id}",
)
assert response.status_code == 200
assert response.json() == expected
def test_unscheduled_task(self, test_client):
expected = {
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.providers.standard.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {
"is_unscheduled": {
"value": True,
"schema": {},
"description": None,
"source": "task",
}
},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": 0,
"start_date": None,
"task_id": None,
"task_display_name": None,
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
"doc_md": None,
}
downstream_dict = {
self.unscheduled_task_id1: self.unscheduled_task_id2,
self.unscheduled_task_id2: None,
}
for task_id, downstream_task_id in downstream_dict.items():
response = test_client.get(
f"{self.api_prefix}/{self.unscheduled_dag_id}/tasks/{task_id}",
)
assert response.status_code == 200
expected["downstream_task_ids"] = [downstream_task_id] if downstream_task_id else []
expected["task_id"] = task_id
expected["task_display_name"] = task_id
assert response.json() == expected
def test_should_respond_200_serialized(self, test_client, testing_dag_bundle):
# Get the dag out of the dagbag before we patch it to an empty one
with DAG(self.dag_id, schedule=None, start_date=self.task1_start_date, doc_md="details") as dag:
task1 = EmptyOperator(task_id=self.task_id, params={"foo": "bar"})
task2 = EmptyOperator(task_id=self.task_id2, start_date=self.task2_start_date)
task1 >> task2
sync_dag_to_db(dag)
dag_bag = DBDagBag()
test_client.app.dependency_overrides[dag_bag_from_app] = lambda: dag_bag
expected = {
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.providers.standard.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [self.task_id2],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {
"foo": {
"value": "bar",
"schema": {},
"description": None,
"source": "task",
}
},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": 0,
"start_date": "2020-06-15T00:00:00Z",
"task_id": "op1",
"task_display_name": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
"doc_md": None,
}
response = test_client.get(
f"{self.api_prefix}/{self.dag_id}/tasks/{self.task_id}",
)
assert response.status_code == 200
assert response.json() == expected
def test_should_respond_404(self, test_client):
task_id = "xxxx_not_existing"
response = test_client.get(
f"{self.api_prefix}/{self.dag_id}/tasks/{task_id}",
)
assert response.status_code == 404
def test_should_respond_404_when_dag_not_found(self, test_client):
dag_id = "xxxx_not_existing"
response = test_client.get(
f"{self.api_prefix}/{dag_id}/tasks/{self.task_id}",
)
assert response.status_code == 404
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(f"{self.api_prefix}/{self.dag_id}/tasks/{self.task_id}")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(f"{self.api_prefix}/{self.dag_id}/tasks/{self.task_id}")
assert response.status_code == 403
| TestGetTask |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 1555,
"end": 1952
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a new state."""
type: StateType
name: Optional[str] = Field(default=None)
message: Optional[str] = Field(default=None, examples=["Run started"])
state_details: StateDetails = Field(default_factory=StateDetails)
data: Union[ResultRecordMetadata, Any] = Field(
default=None,
)
| StateCreate |
python | modin-project__modin | modin/config/envvars.py | {
"start": 40156,
"end": 40347
} | class ____(EnvironmentVariable, type=str):
"""Engine to run `read_sql`."""
varname = "MODIN_READ_SQL_ENGINE"
default = "Pandas"
choices = ("Pandas", "Connectorx")
| ReadSqlEngine |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/image_ops/attention_ops_test.py | {
"start": 1079,
"end": 11759
} | class ____(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
expected_rows, expected_cols):
"""Verifies the output values of the glimpse extraction kernel.
Args:
tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
offsets: Relative location of the center of the glimpse in the input
image expressed as [row_offset, col_offset].
expected_rows: A list containing the expected row numbers (None for
out of bound entries that are expected to be replaced by uniform
random entries in [0,1) ).
expected_cols: Same as expected_rows, but for column numbers.
"""
rows = tensor_in_sizes[0]
cols = tensor_in_sizes[1]
# Row Tensor with entries by row.
# [[ 1 1 1 ... ]
# [ 2 2 2 ... ]
# [ 3 3 3 ... ]
# [ ...
# ]
t_rows = array_ops.tile(
[[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_rows_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
[0, 2, 1, 3])
# Column Tensor with entries by column.
# [[ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ ... ]
# ]
t_cols = array_ops.tile(
[[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_cols_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
[0, 2, 1, 3])
# extract_glimpses from Row and Column Tensor, respectively.
# Switch order for glimpse_sizes and offsets to switch from (row, col)
# convention to tensorflows (height, width) convention.
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
glimpse_rows = (array_ops.transpose(
image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
glimpse_cols = (array_ops.transpose(
image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the TensorFlow Graph.
with self.cached_session() as sess:
value_rows, value_cols = self.evaluate([glimpse_rows, glimpse_cols])
# Check dimensions of returned glimpse.
self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
# Check entries.
min_random_val = 0
max_random_val = max(rows, cols)
for i in range(glimpse_sizes[0]):
for j in range(glimpse_sizes[1]):
if expected_rows[i] is None or expected_cols[j] is None:
self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
else:
self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
def testCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.0, 0.0],
expected_rows=[20, 21, 22],
expected_cols=[29, 30, 31, 32, 33])
def testEmptyTensor(self):
empty_image = np.zeros((0, 4, 3, 0))
offsets = np.zeros((0, 2))
with self.cached_session():
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
self.assertAllEqual(
np.zeros((0, 1, 1, 0), dtype=np.float32), self.evaluate(result))
def testLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[41, 61],
offsets=[0.0, 0.0],
expected_rows=list(range(1, 42)),
expected_cols=list(range(1, 62)))
def testTooLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[43, 63],
offsets=[0.0, 0.0],
expected_rows=[None] + list(range(1, 42)) + [None],
expected_cols=[None] + list(range(1, 62)) + [None])
def testGlimpseFullOverlap(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.1, 0.3],
expected_rows=[22, 23, 24],
expected_cols=[38, 39, 40, 41, 42])
def testGlimpseFullOverlap2(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 3],
offsets=[-0.7, -0.7],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[8, 9, 10])
def testGlimpseBeforeLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 5],
offsets=[-0.7, -0.9],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[1, 2, 3, 4, 5])
def testGlimpseLowerRightCorner(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[1.0, 1.0],
expected_rows=[38, 39, 40, 41, None, None, None],
expected_cols=[59, 60, 61, None, None])
def testGlimpseNoOverlap(self):
self._VerifyValues(
tensor_in_sizes=[20, 30],
glimpse_sizes=[3, 3],
offsets=[-2.0, 2.0],
expected_rows=[None, None, None],
expected_cols=[None, None, None])
def testGlimpseOnLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 7],
offsets=[-0.7, -1.0],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[None, None, None, 1, 2, 3, 4])
def testGlimpseUpperMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[-1, 0.9],
expected_rows=[None, None, None, 1, 2, 3, 4],
expected_cols=[56, 57, 58, 59, 60])
def testGlimpseNoiseZeroV1Compatible(self):
# Note: The old versions of extract_glimpse was incorrect in implementation.
# This test is for compatibility so that graph save in old versions behave
# the same. Notice the API uses gen_image_ops.extract_glimpse() on purpose.
#
# Image:
# [ 0. 1. 2. 3. 4.]
# [ 5. 6. 7. 8. 9.]
# [ 10. 11. 12. 13. 14.]
# [ 15. 16. 17. 18. 19.]
# [ 20. 21. 22. 23. 24.]
img = constant_op.constant(
np.arange(25).reshape((1, 5, 5, 1)), dtype=dtypes.float32)
with self.test_session():
# Result 1:
# [ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]
result1 = gen_image_ops.extract_glimpse(
img, [3, 3], [[-2, 2]],
centered=False,
normalized=False,
noise='zero',
uniform_noise=False)
self.assertAllEqual(
np.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
self.evaluate(result1)[0, :, :, 0])
# Result 2:
# [ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 1. 2. 3. 4. 0.]
# [ 0. 5. 6. 7. 8. 9. 0.]
# [ 0. 10. 11. 12. 13. 14. 0.]
# [ 0. 15. 16. 17. 18. 19. 0.]
# [ 0. 20. 21. 22. 23. 24. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]
result2 = gen_image_ops.extract_glimpse(
img, [7, 7], [[0, 0]],
normalized=False,
noise='zero',
uniform_noise=False)
self.assertAllEqual(
np.asarray([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 4, 0],
[0, 5, 6, 7, 8, 9, 0], [0, 10, 11, 12, 13, 14, 0],
[0, 15, 16, 17, 18, 19, 0], [0, 20, 21, 22, 23, 24, 0],
[0, 0, 0, 0, 0, 0, 0]]),
self.evaluate(result2)[0, :, :, 0])
def testGlimpseNoiseZero(self):
# Image:
# [ 0. 1. 2. 3. 4.]
# [ 5. 6. 7. 8. 9.]
# [ 10. 11. 12. 13. 14.]
# [ 15. 16. 17. 18. 19.]
# [ 20. 21. 22. 23. 24.]
img = constant_op.constant(
np.arange(25).reshape((1, 5, 5, 1)), dtype=dtypes.float32)
with self.test_session():
# Result 1:
# [ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]
result1 = image_ops.extract_glimpse_v2(
img, [3, 3], [[-2, -2]],
centered=False,
normalized=False,
noise='zero')
self.assertAllEqual(
np.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
self.evaluate(result1)[0, :, :, 0])
# Result 2:
# [ 12. 13. 14. 0. 0. 0. 0.]
# [ 17. 18. 19. 0. 0. 0. 0.]
# [ 22. 23. 24. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]
result2 = image_ops.extract_glimpse_v2(
img, [7, 7], [[0, 0]], normalized=False, noise='zero')
self.assertAllEqual(
np.asarray([[12, 13, 14, 0, 0, 0, 0], [17, 18, 19, 0, 0, 0, 0],
[22, 23, 24, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]),
self.evaluate(result2)[0, :, :, 0])
def testGlimpseNonNormalizedNonCentered(self):
img = constant_op.constant(
np.arange(25).reshape((1, 5, 5, 1)), dtype=dtypes.float32)
with self.test_session():
result1 = image_ops.extract_glimpse_v2(
img, [3, 3], [[0, 0]], centered=False, normalized=False)
result2 = image_ops.extract_glimpse_v2(
img, [3, 3], [[1, 0]], centered=False, normalized=False)
self.assertAllEqual(
np.asarray([[0, 1, 2], [5, 6, 7], [10, 11, 12]]),
self.evaluate(result1)[0, :, :, 0])
self.assertAllEqual(
np.asarray([[5, 6, 7], [10, 11, 12], [15, 16, 17]]),
self.evaluate(result2)[0, :, :, 0])
def testGlimpseNegativeInput(self):
img = np.arange(9).reshape([1, 3, 3, 1])
with self.test_session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
result = image_ops.extract_glimpse_v2(
img,
size=[1023, -63],
offsets=[1023, 63],
centered=False,
normalized=False)
self.evaluate(result)
if __name__ == '__main__':
test.main()
| ExtractGlimpseTest |
python | google__pytype | pytype/tools/config_test.py | {
"start": 1950,
"end": 3191
} | class ____(unittest.TestCase):
def test_items(self):
with test_utils.Tempdir() as d:
f = d.create_file(
'setup.cfg',
textwrap.dedent("""
[test]
k1 = v1
k2 = v2
"""),
)
section = config.IniConfigSection.create_from_file(f, 'test')
self.assertSequenceEqual(section.items(), [('k1', 'v1'), ('k2', 'v2')])
def test_empty(self):
with test_utils.Tempdir() as d:
f = d.create_file(
'setup.cfg',
textwrap.dedent("""
[test]
k =
"""),
)
section = config.IniConfigSection.create_from_file(f, 'test')
self.assertSequenceEqual(section.items(), [('k', '')])
def test_no_file(self):
self.assertIsNone(
config.IniConfigSection.create_from_file('/does/not/exist.cfg', 'test')
)
def test_malformed_file(self):
with test_utils.Tempdir() as d:
f = d.create_file('setup.cfg', 'rainbow = unicorns')
self.assertIsNone(config.IniConfigSection.create_from_file(f, 'test'))
def test_missing_section(self):
with test_utils.Tempdir() as d:
f = d.create_file('setup.cfg')
self.assertIsNone(config.IniConfigSection.create_from_file(f, 'test'))
| TestIniConfigSection |
python | instagram__MonkeyType | tests/test_stubs.py | {
"start": 48918,
"end": 48964
} | class ____:
class Child:
pass
| Parent |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/final2.py | {
"start": 2984,
"end": 3054
} | class ____:
@final
def __init__(self, v: int) -> None: ...
| Base5 |
python | rapidsai__cudf | python/cudf/cudf/core/index.py | {
"start": 162083,
"end": 174748
} | class ____(Index):
"""
A categorical of orderable values that represent the indices of another
Column
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If categories are given,
values not in categories will be replaced with None/NaN.
categories : list-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in dtype),
they will be inferred from the data.
ordered : bool, optional
Whether or not this categorical is treated as an ordered categorical.
If not given here or in dtype, the resulting categorical will be
unordered.
dtype : CategoricalDtype or "category", optional
If CategoricalDtype, cannot be used together with categories or
ordered.
copy : bool, default False
Make a copy of input.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
Methods
-------
equals
Returns
-------
CategoricalIndex
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> cudf.CategoricalIndex(
... data=[1, 2, 3, 4], categories=[1, 2], ordered=False, name="a")
CategoricalIndex([1, 2, <NA>, <NA>], categories=[1, 2], ordered=False, dtype='category', name='a')
>>> cudf.CategoricalIndex(
... data=[1, 2, 3, 4], dtype=pd.CategoricalDtype([1, 2, 3]), name="a")
CategoricalIndex([1, 2, 3, <NA>], categories=[1, 2, 3], ordered=False, dtype='category', name='a')
"""
@_performance_tracking
def __init__(
self,
data=None,
categories=None,
ordered=None,
dtype=None,
copy=False,
name=None,
nan_as_null=no_default,
):
if isinstance(dtype, (pd.CategoricalDtype, cudf.CategoricalDtype)):
if categories is not None or ordered is not None:
raise ValueError(
"Cannot specify `categories` or "
"`ordered` together with `dtype`."
)
if copy:
data = as_column(data, dtype=dtype).copy(deep=True)
name = _getdefault_name(data, name=name)
if isinstance(data, CategoricalColumn):
data = data
elif isinstance(getattr(data, "dtype", None), pd.CategoricalDtype):
data = as_column(data)
elif isinstance(data, (cudf.Series, Index)) and isinstance(
data.dtype, cudf.CategoricalDtype
):
data = data._column
else:
if dtype is None or (
isinstance(dtype, str) and dtype == "category"
):
dtype = cudf.CategoricalDtype()
data = as_column(data, dtype=dtype)
# dtype has already been taken care
dtype = None
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif isinstance(dtype, (pd.CategoricalDtype, cudf.CategoricalDtype)):
data = data.set_categories(dtype.categories, ordered=ordered)
elif ordered is True and data.ordered is False:
data = data.as_ordered(ordered=True)
elif ordered is False and data.ordered is True:
data = data.as_ordered(ordered=False)
SingleColumnFrame.__init__(
self, ColumnAccessor({name: data}, verify=False)
)
@classmethod
@_performance_tracking
def _from_column(
cls, column: ColumnBase, *, name: Hashable = None, freq: Any = None
) -> Self:
if not isinstance(column.dtype, cudf.CategoricalDtype):
raise ValueError("column must have a categorial type.")
return super()._from_column(column, name=name)
@classmethod
def from_codes(
cls,
codes: ColumnLike,
categories: ColumnLike,
ordered: bool,
name: Hashable = None,
) -> Self:
"""
Construct a CategoricalIndex from codes and categories.
More performant that using the CategoricalIndex constructor.
Parameters
----------
codes : array-like
The integer codes of the CategoricalIndex.
categories : array-like
The category labels of the CategoricalIndex.
ordered : bool
Whether the categories are ordered.
name : Hashable, optional
The name of the CategoricalIndex.
"""
codes = as_column(codes, dtype=np.dtype(np.int32))
categories = as_column(categories)
cat_col = codes._with_type_metadata(
cudf.CategoricalDtype(categories=categories, ordered=ordered)
)
return cls._from_column(cat_col, name=name)
@property
def ordered(self) -> bool:
return self._column.ordered
@cached_property
def _constructor(self):
return CategoricalIndex
@cached_property
def inferred_type(self) -> str:
return "categorical"
@property
@_performance_tracking
def codes(self) -> Index:
"""
The category codes of this categorical.
"""
return Index._from_column(self._column.codes)
@property
@_performance_tracking
def categories(self) -> Index:
"""
The categories of this categorical.
"""
return self.dtype.categories
def _is_boolean(self) -> bool:
return False
def _is_categorical(self) -> bool:
return True
def add_categories(self, new_categories) -> Self:
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
"""
return type(self)._from_column(
self._column.add_categories(new_categories), name=self.name
)
def as_ordered(self) -> Self:
"""
Set the Categorical to be ordered.
"""
return type(self)._from_column(
self._column.as_ordered(ordered=True), name=self.name
)
def as_unordered(self) -> Self:
"""
Set the Categorical to be unordered.
"""
return type(self)._from_column(
self._column.as_ordered(ordered=False), name=self.name
)
def remove_categories(self, removals) -> Self:
"""
Remove the specified categories.
`removals` must be included in the old categories.
Parameters
----------
removals : category or list of categories
The categories which should be removed.
"""
return type(self)._from_column(
self._column.remove_categories(removals), name=self.name
)
def remove_unused_categories(self) -> Self:
"""
Remove categories which are not used.
This method is currently not supported.
"""
return type(self)._from_column(
self._column.remove_unused_categories(), name=self.name
)
def rename_categories(self, new_categories) -> Self:
"""
Rename categories.
This method is currently not supported.
"""
return type(self)._from_column(
self._column.rename_categories(new_categories), name=self.name
)
def reorder_categories(self, new_categories, ordered=None) -> Self:
"""
Reorder categories as specified in new_categories.
``new_categories`` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
"""
return type(self)._from_column(
self._column.reorder_categories(new_categories, ordered=ordered),
name=self.name,
)
def set_categories(
self, new_categories, ordered=None, rename: bool = False
) -> Self:
"""
Set the categories to the specified new_categories.
Parameters
----------
new_categories : list-like
The categories in new order.
ordered : bool, default None
Whether or not the categorical is treated as
a ordered categorical. If not given, do
not change the ordered information.
rename : bool, default False
Whether or not the `new_categories` should be
considered as a rename of the old categories
or as reordered categories.
"""
return type(self)._from_column(
self._column.set_categories(
new_categories, ordered=ordered, rename=rename
),
name=self.name,
)
@_performance_tracking
def interval_range(
start=None,
end=None,
periods=None,
freq=None,
name=None,
closed="right",
) -> IntervalIndex:
"""
Returns a fixed frequency IntervalIndex.
Parameters
----------
start : numeric, default None
Left bound for generating intervals.
end : numeric , default None
Right bound for generating intervals.
periods : int, default None
Number of periods to generate
freq : numeric, default None
The length of each interval. Must be consistent
with the type of start and end
name : str, default None
Name of the resulting IntervalIndex.
closed : {"left", "right", "both", "neither"}, default "right"
Whether the intervals are closed on the left-side, right-side,
both or neither.
Returns
-------
IntervalIndex
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> cudf.interval_range(start=0,end=5)
IntervalIndex([(0, 0], (1, 1], (2, 2], (3, 3], (4, 4], (5, 5]],
...closed='right',dtype='interval')
>>> cudf.interval_range(start=0,end=10, freq=2,closed='left')
IntervalIndex([[0, 2), [2, 4), [4, 6), [6, 8), [8, 10)],
...closed='left',dtype='interval')
>>> cudf.interval_range(start=0,end=10, periods=3,closed='left')
...IntervalIndex([[0.0, 3.3333333333333335),
[3.3333333333333335, 6.666666666666667),
[6.666666666666667, 10.0)],
closed='left',
dtype='interval')
"""
nargs = sum(_ is not None for _ in (start, end, periods, freq))
# we need at least three of (start, end, periods, freq)
if nargs == 2 and freq is None:
freq = 1
nargs += 1
if nargs != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
if periods is not None and not is_integer(periods):
warnings.warn(
"Non-integer 'periods' in cudf.date_range, and cudf.interval_range"
" are deprecated and will raise in a future version.",
FutureWarning,
)
if start is None:
start = end - freq * periods
elif freq is None:
quotient, remainder = divmod(end - start, periods)
if remainder:
freq = (end - start) / periods
else:
freq = int(quotient)
elif periods is None:
periods = int((end - start) / freq)
elif end is None:
end = start + periods * freq
pa_start = pa.scalar(start)
pa_end = pa.scalar(end)
pa_freq = pa.scalar(freq)
if any(
not is_dtype_obj_numeric(
cudf_dtype_from_pa_type(x.type), include_decimal=False
)
for x in (pa_start, pa.scalar(periods), pa_freq, pa_end)
):
raise ValueError("start, end, periods, freq must be numeric values.")
common_dtype = find_common_type(
(
cudf_dtype_from_pa_type(pa_start.type),
cudf_dtype_from_pa_type(pa_freq.type),
cudf_dtype_from_pa_type(pa_end.type),
)
)
pa_start = pa_start.cast(cudf_dtype_to_pa_type(common_dtype))
pa_freq = pa_freq.cast(cudf_dtype_to_pa_type(common_dtype))
with acquire_spill_lock():
bin_edges = ColumnBase.from_pylibcudf(
plc.filling.sequence(
size=periods + 1,
init=pa_scalar_to_plc_scalar(pa_start),
step=pa_scalar_to_plc_scalar(pa_freq),
)
)
return IntervalIndex.from_breaks(
bin_edges.astype(common_dtype), closed=closed, name=name
)
| CategoricalIndex |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/datatree.py | {
"start": 3406,
"end": 13173
} | class ____:
"""Represents a transition to a finished state."""
status: Status
interesting_origin: InterestingOrigin | None
def _repr_pretty_(self, p: "RepresentationPrinter", cycle: bool) -> None:
assert cycle is False
o = self.interesting_origin
# avoid str(o), which can include multiple lines of context
origin = (
"" if o is None else f", {o.exc_type.__name__} at {o.filename}:{o.lineno}"
)
p.text(f"Conclusion ({self.status!r}{origin})")
# The number of max children where, beyond this, it is practically impossible
# for hypothesis to saturate / explore all children nodes in a reasonable time
# frame. We use this to bail out of expensive max children computations early,
# where the numbers involved are so large that we know they will be larger than
# this number.
#
# Note that it's ok for us to underestimate the number of max children of a node
# by using this. We just may think the node is exhausted when in fact it has more
# possible children to be explored. This has the potential to finish generation
# early due to exhausting the entire tree, but that is quite unlikely: (1) the
# number of examples would have to be quite high, and (2) the tree would have to
# contain only one or two nodes, or generate_novel_prefix would simply switch to
# exploring another non-exhausted node.
#
# Also note that we may sometimes compute max children above this value. In other
# words, this is *not* a hard maximum on the computed max children. It's the point
# where further computation is not beneficial - but sometimes doing that computation
# unconditionally is cheaper than estimating against this value.
#
# The one case where this may be detrimental is fuzzing, where the throughput of
# examples is so high that it really may saturate important nodes. We'll cross
# that bridge when we come to it.
MAX_CHILDREN_EFFECTIVELY_INFINITE: Final[int] = 10_000_000
def _count_distinct_strings(*, alphabet_size: int, min_size: int, max_size: int) -> int:
# We want to estimate if we're going to have more children than
# MAX_CHILDREN_EFFECTIVELY_INFINITE, without computing a potentially
# extremely expensive pow. We'll check the two extreme cases - if the
# number of strings in the largest string size alone is enough to put us
# over this limit (at alphabet_size >= 2), and if the variation in sizes
# (at alphabet_size == 1) is enough. If neither result in an early return,
# the exact result should be reasonably cheap to compute.
if alphabet_size == 0:
# Special-case the empty string, avoid error in math.log(0).
return 1
elif alphabet_size == 1:
# Special-case the constant alphabet, invalid in the geom-series sum.
return max_size - min_size + 1
else:
# Estimate against log, which is cheaper than computing a pow.
#
# m = max_size
# a = alphabet_size
# N = MAX_CHILDREN_EFFECTIVELY_INFINITE
#
# a**m > N
# <=> m * log(a) > log(N)
log_max_sized_children = max_size * math.log(alphabet_size)
if log_max_sized_children > math.log(MAX_CHILDREN_EFFECTIVELY_INFINITE):
return MAX_CHILDREN_EFFECTIVELY_INFINITE
# The sum of a geometric series is given by (ref: wikipedia):
# ᵐ∑ₖ₌₀ aᵏ = (aᵐ⁺¹ - 1) / (a - 1)
# = S(m) / S(0)
# assuming a != 1 and using the definition
# S(m) := aᵐ⁺¹ - 1.
# The sum we want, starting from a number n [0 <= n <= m] rather than zero, is
# ᵐ∑ₖ₌ₙ aᵏ = ᵐ∑ₖ₌₀ aᵏ - ⁿ⁻¹∑ₖ₌₀ aᵏ = S(m) / S(0) - S(n - 1) / S(0)
def S(n):
return alphabet_size ** (n + 1) - 1
return (S(max_size) - S(min_size - 1)) // S(0)
def compute_max_children(
choice_type: ChoiceTypeT, constraints: ChoiceConstraintsT
) -> int:
if choice_type == "integer":
constraints = cast(IntegerConstraints, constraints)
min_value = constraints["min_value"]
max_value = constraints["max_value"]
if min_value is None and max_value is None:
# full 128 bit range.
return 2**128 - 1
if min_value is not None and max_value is not None:
# count between min/max value.
return max_value - min_value + 1
# hard case: only one bound was specified. Here we probe either upwards
# or downwards with our full 128 bit generation, but only half of these
# (plus one for the case of generating zero) result in a probe in the
# direction we want. ((2**128 - 1) // 2) + 1 == 2 ** 127
assert (min_value is None) != (max_value is None)
return 2**127
elif choice_type == "boolean":
constraints = cast(BooleanConstraints, constraints)
p = constraints["p"]
# probabilities of 0 or 1 (or effectively 0 or 1) only have one choice.
if p <= 2 ** (-64) or p >= (1 - 2 ** (-64)):
return 1
return 2
elif choice_type == "bytes":
constraints = cast(BytesConstraints, constraints)
return _count_distinct_strings(
alphabet_size=2**8,
min_size=constraints["min_size"],
max_size=constraints["max_size"],
)
elif choice_type == "string":
constraints = cast(StringConstraints, constraints)
min_size = constraints["min_size"]
max_size = constraints["max_size"]
intervals = constraints["intervals"]
return _count_distinct_strings(
alphabet_size=len(intervals), min_size=min_size, max_size=max_size
)
elif choice_type == "float":
constraints = cast(FloatConstraints, constraints)
min_value_f = constraints["min_value"]
max_value_f = constraints["max_value"]
smallest_nonzero_magnitude = constraints["smallest_nonzero_magnitude"]
count = count_between_floats(min_value_f, max_value_f)
# we have two intervals:
# a. [min_value, max_value]
# b. [-smallest_nonzero_magnitude, smallest_nonzero_magnitude]
#
# which could be subsets (in either order), overlapping, or disjoint. We
# want the interval difference a - b.
# next_down because endpoints are ok with smallest_nonzero_magnitude
min_point = max(min_value_f, -flt.next_down(smallest_nonzero_magnitude))
max_point = min(max_value_f, flt.next_down(smallest_nonzero_magnitude))
if min_point > max_point:
# case: disjoint intervals.
return count
count -= count_between_floats(min_point, max_point)
if sign_aware_lte(min_value_f, -0.0) and sign_aware_lte(-0.0, max_value_f):
# account for -0.0
count += 1
if sign_aware_lte(min_value_f, 0.0) and sign_aware_lte(0.0, max_value_f):
# account for 0.0
count += 1
return count
raise NotImplementedError(f"unhandled choice_type {choice_type}")
# In theory, this is a strict superset of the functionality of compute_max_children;
#
# assert len(all_children(choice_type, constraints)) == compute_max_children(choice_type, constraints)
#
# In practice, we maintain two distinct implementations for efficiency and space
# reasons. If you just need the number of children, it is cheaper to use
# compute_max_children than to reify the list of children (only to immediately
# throw it away).
def _floats_between(a: float, b: float) -> Generator[float, None, None]:
for n in range(float_to_int(a), float_to_int(b) + 1):
yield int_to_float(n)
def all_children(
choice_type: ChoiceTypeT, constraints: ChoiceConstraintsT
) -> Generator[ChoiceT, None, None]:
if choice_type != "float":
for index in range(compute_max_children(choice_type, constraints)):
yield choice_from_index(index, choice_type, constraints)
else:
constraints = cast(FloatConstraints, constraints)
# the float ordering is not injective (because of resampling
# out-of-bounds values), so using choice_from_index would result in
# duplicates. This violates invariants in datatree about being able
# to draw unique new children using all_children.
#
# We instead maintain a separate implementation for floats.
# TODO_IR write a better (bijective) ordering for floats and remove this!
min_value = constraints["min_value"]
max_value = constraints["max_value"]
smallest_nonzero_magnitude = constraints["smallest_nonzero_magnitude"]
# handle zeroes separately so smallest_nonzero_magnitude can think of
# itself as a complete interval (instead of a hole at ±0).
if sign_aware_lte(min_value, -0.0) and sign_aware_lte(-0.0, max_value):
yield -0.0
if sign_aware_lte(min_value, 0.0) and sign_aware_lte(0.0, max_value):
yield 0.0
if flt.is_negative(min_value):
if flt.is_negative(max_value):
# case: both negative.
max_point = min(max_value, -smallest_nonzero_magnitude)
# float_to_int increases as negative magnitude increases, so
# invert order.
yield from _floats_between(max_point, min_value)
else:
# case: straddles midpoint (which is between -0.0 and 0.0).
yield from _floats_between(-smallest_nonzero_magnitude, min_value)
yield from _floats_between(smallest_nonzero_magnitude, max_value)
else:
# case: both positive.
min_point = max(min_value, smallest_nonzero_magnitude)
yield from _floats_between(min_point, max_value)
@dataclass(slots=True, frozen=False)
| Conclusion |
python | tensorflow__tensorflow | tensorflow/python/trackable/resource.py | {
"start": 3049,
"end": 7784
} | class ____(base.Trackable, metaclass=_ResourceMetaclass):
"""Holds a Tensor which a tf.function can capture.
`CapturableResource`s are discovered by traversing the graph of object
attributes, e.g. during `tf.saved_model.save`. They are excluded from the
scope-based tracking of `TrackableResource`; generally things that require
initialization should inherit from `TrackableResource` instead of
`CapturableResource` directly.
"""
def __init__(self, device=""):
"""Initialize the `CapturableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
"""
self._resource_handle_value = None
self._resource_device = device
self._self_destruction_context = (
context.eager_mode if context.executing_eagerly()
else ops.get_default_graph().as_default)
@classmethod
def _resource_type(cls):
return cls.__name__
@property
def _destruction_context(self):
return getattr(self, "_self_destruction_context",
# no-op context
contextlib.suppress)
@_destruction_context.setter
def _destruction_context(self, destruction_context):
self._self_destruction_context = destruction_context
def _create_resource(self):
"""A function that creates a resource handle."""
raise NotImplementedError("TrackableResource._create_resource not "
"implemented.")
@property
def _resource_handle(self):
return self._resource_handle_value
@_resource_handle.setter
def _resource_handle(self, value):
if isinstance(value, (tensor.Tensor, ops.EagerTensor)):
value._parent_trackable = weakref.ref(self) # pylint: disable=protected-access
self._resource_handle_value = value
def _initialize(self):
"""A function that initializes the resource. Optional."""
pass
def _destroy_resource(self):
"""A function that destroys the resource. Optional."""
pass
@property
def resource_handle(self):
"""Returns the resource handle associated with this Resource."""
if self._resource_handle is None:
with ops.device(self._resource_device):
self._resource_handle = self._create_resource()
return self._resource_handle
def _export_to_saved_model_graph(
self, object_map, tensor_map, **unused_kwargs):
"""For implementing `Trackable`."""
new_obj = copy.copy(self)
# pylint: disable=protected-access
with ops.device(self._resource_device):
new_resource = new_obj._create_resource()
new_obj._resource_handle = new_resource
# pylint: enable=protected-access
object_map[self] = new_obj
tensor_map[self.resource_handle] = new_resource
return [self.resource_handle]
def _trackable_children(self, save_type=base.SaveType.CHECKPOINT, **kwargs):
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
@def_function.function(input_signature=[], autograph=False)
def _creator():
resource = self._create_resource()
return resource
@def_function.function(input_signature=[], autograph=False)
def _initializer():
self._initialize()
return 1 # Dummy return
@def_function.function(input_signature=[], autograph=False)
def _destroyer():
self._destroy_resource()
return 1 # Dummy return
children.update({
"_create_resource": _creator,
"_initialize": _initializer,
"_destroy_resource": _destroyer,
})
return children
def __del__(self):
try:
# Outer race condition: on program exit, the destruction context may be
# deleted before this __del__ is called. At this point we can safely
# exit without calling _destroy_resource() and let Python handle things.
with self._destruction_context():
# Inner race condition: possible between this and `ScopedTFFunction`
# whereby if an entire garbage collection chain containing both
# objects is moved to unreachable during the same garbage collection
# cycle, the __del__ for `ScopedTFFunction` can be collected before
# this method is called. In that case, we can't do much but
# continue.
self._destroy_resource()
except Exception: # pylint: disable=broad-except
# Silence all error logs that occur when attempting to destroy this
# resource.
pass
@tf_export("saved_model.experimental.TrackableResource")
| CapturableResource |
python | tensorflow__tensorflow | tensorflow/python/framework/ops.py | {
"start": 211969,
"end": 214627
} | class ____(contextlib.AbstractContextManager[str]): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None) -> None:
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, str)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self) -> str:
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info) -> None:
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
| internal_name_scope_v1 |
python | pennersr__django-allauth | allauth/socialaccount/providers/spotify/views.py | {
"start": 181,
"end": 894
} | class ____(OAuth2Adapter):
provider_id = "spotify"
access_token_url = "https://accounts.spotify.com/api/token" # nosec
authorize_url = "https://accounts.spotify.com/authorize"
profile_url = "https://api.spotify.com/v1/me"
def complete_login(self, request, app, token, **kwargs):
extra_data = (
get_adapter()
.get_requests_session()
.get(self.profile_url, params={"access_token": token.token})
)
return self.get_provider().sociallogin_from_response(request, extra_data.json())
oauth_login = OAuth2LoginView.adapter_view(SpotifyOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(SpotifyOAuth2Adapter)
| SpotifyOAuth2Adapter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 544418,
"end": 544761
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("PullRequestTimelineItems", graphql_name="node")
| PullRequestTimelineItemsEdge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.