language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | explosion__spaCy | spacy/lang/sl/__init__.py | {
"start": 505,
"end": 607
} | class ____(Language):
lang = "sl"
Defaults = SlovenianDefaults
__all__ = ["Slovenian"]
| Slovenian |
python | boto__boto3 | boto3/dynamodb/conditions.py | {
"start": 808,
"end": 1814
} | class ____:
expression_format = ''
expression_operator = ''
has_grouped_values = False
def __init__(self, *values):
self._values = values
def __and__(self, other):
if not isinstance(other, ConditionBase):
raise DynamoDBOperationNotSupportedError('AND', other)
return And(self, other)
def __or__(self, other):
if not isinstance(other, ConditionBase):
raise DynamoDBOperationNotSupportedError('OR', other)
return Or(self, other)
def __invert__(self):
return Not(self)
def get_expression(self):
return {
'format': self.expression_format,
'operator': self.expression_operator,
'values': self._values,
}
def __eq__(self, other):
if isinstance(other, type(self)):
if self._values == other._values:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
| ConditionBase |
python | Delgan__loguru | loguru/_better_exceptions.py | {
"start": 3797,
"end": 21581
} | class ____:
_default_theme = frozenset(
{
"introduction": "\x1b[33m\x1b[1m{}\x1b[0m",
"cause": "\x1b[1m{}\x1b[0m",
"context": "\x1b[1m{}\x1b[0m",
"dirname": "\x1b[32m{}\x1b[0m",
"basename": "\x1b[32m\x1b[1m{}\x1b[0m",
"line": "\x1b[33m{}\x1b[0m",
"function": "\x1b[35m{}\x1b[0m",
"exception_type": "\x1b[31m\x1b[1m{}\x1b[0m",
"exception_value": "\x1b[1m{}\x1b[0m",
"arrows": "\x1b[36m{}\x1b[0m",
"value": "\x1b[36m\x1b[1m{}\x1b[0m",
}.items()
)
def __init__(
self,
colorize=False,
backtrace=False,
diagnose=True,
theme=None,
style=None,
max_length=128,
encoding="ascii",
hidden_frames_filename=None,
prefix="",
):
self._colorize = colorize
self._diagnose = diagnose
self._theme = theme or dict(self._default_theme)
self._backtrace = backtrace
self._syntax_highlighter = SyntaxHighlighter(style)
self._max_length = max_length
self._encoding = encoding
self._hidden_frames_filename = hidden_frames_filename
self._prefix = prefix
self._lib_dirs = self._get_lib_dirs()
self._pipe_char = self._get_char("\u2502", "|")
self._cap_char = self._get_char("\u2514", "->")
self._catch_point_identifier = " <Loguru catch point here>"
@staticmethod
def _get_lib_dirs():
schemes = sysconfig.get_scheme_names()
names = ["stdlib", "platstdlib", "platlib", "purelib"]
paths = {sysconfig.get_path(name, scheme) for scheme in schemes for name in names}
return [os.path.abspath(path).lower() + os.sep for path in paths if path in sys.path]
@staticmethod
def _indent(text, count, *, prefix="| "):
if count == 0:
yield text
return
for line in text.splitlines(True):
indented = " " * count + prefix + line
yield indented.rstrip() + "\n"
def _get_char(self, char, default):
try:
char.encode(self._encoding)
except (UnicodeEncodeError, LookupError):
return default
else:
return char
def _is_file_mine(self, file):
filepath = os.path.abspath(file).lower()
if not filepath.endswith(".py"):
return False
return not any(filepath.startswith(d) for d in self._lib_dirs)
def _should_include_frame(self, frame):
return frame.f_code.co_filename != self._hidden_frames_filename
def _extract_frames(self, tb, is_first, *, limit=None, from_decorator=False):
frames, final_source = [], None
if tb is None or (limit is not None and limit <= 0):
return frames, final_source
def get_info(frame, lineno):
filename = frame.f_code.co_filename
function = frame.f_code.co_name
source = linecache.getline(filename, lineno).strip()
return filename, lineno, function, source
infos = []
if self._should_include_frame(tb.tb_frame):
infos.append((get_info(tb.tb_frame, tb.tb_lineno), tb.tb_frame))
get_parent_only = from_decorator and not self._backtrace
if (self._backtrace and is_first) or get_parent_only:
frame = tb.tb_frame.f_back
while frame:
if self._should_include_frame(frame):
infos.insert(0, (get_info(frame, frame.f_lineno), frame))
if get_parent_only:
break
frame = frame.f_back
if infos and not get_parent_only:
(filename, lineno, function, source), frame = infos[-1]
function += self._catch_point_identifier
infos[-1] = ((filename, lineno, function, source), frame)
tb = tb.tb_next
while tb:
if self._should_include_frame(tb.tb_frame):
infos.append((get_info(tb.tb_frame, tb.tb_lineno), tb.tb_frame))
tb = tb.tb_next
if limit is not None:
infos = infos[-limit:]
for (filename, lineno, function, source), frame in infos:
final_source = source
if source:
colorize = self._colorize and self._is_file_mine(filename)
lines = []
if colorize:
lines.append(self._syntax_highlighter.highlight(source))
else:
lines.append(source)
if self._diagnose:
relevant_values = self._get_relevant_values(source, frame)
values = self._format_relevant_values(list(relevant_values), colorize)
lines += list(values)
source = "\n ".join(lines)
frames.append((filename, lineno, function, source))
return frames, final_source
def _get_relevant_values(self, source, frame):
value = None
pending = None
is_attribute = False
is_valid_value = False
is_assignment = True
for token in self._syntax_highlighter.tokenize(source):
type_, string, (_, col), *_ = token
if pending is not None:
# Keyword arguments are ignored
if type_ != tokenize.OP or string != "=" or is_assignment:
yield pending
pending = None
if type_ == tokenize.NAME and not keyword.iskeyword(string):
if not is_attribute:
for variables in (frame.f_locals, frame.f_globals):
try:
value = variables[string]
except KeyError:
continue
else:
is_valid_value = True
pending = (col, self._format_value(value))
break
elif is_valid_value:
try:
value = inspect.getattr_static(value, string)
except AttributeError:
is_valid_value = False
else:
yield (col, self._format_value(value))
elif type_ == tokenize.OP and string == ".":
is_attribute = True
is_assignment = False
elif type_ == tokenize.OP and string == ";":
is_assignment = True
is_attribute = False
is_valid_value = False
else:
is_attribute = False
is_valid_value = False
is_assignment = False
if pending is not None:
yield pending
def _format_relevant_values(self, relevant_values, colorize):
for i in reversed(range(len(relevant_values))):
col, value = relevant_values[i]
pipe_cols = [pcol for pcol, _ in relevant_values[:i]]
pre_line = ""
index = 0
for pc in pipe_cols:
pre_line += (" " * (pc - index)) + self._pipe_char
index = pc + 1
pre_line += " " * (col - index)
value_lines = value.split("\n")
for n, value_line in enumerate(value_lines):
if n == 0:
arrows = pre_line + self._cap_char + " "
else:
arrows = pre_line + " " * (len(self._cap_char) + 1)
if colorize:
arrows = self._theme["arrows"].format(arrows)
value_line = self._theme["value"].format(value_line)
yield arrows + value_line
def _format_value(self, v):
try:
v = repr(v)
except Exception:
v = "<unprintable %s object>" % type(v).__name__
max_length = self._max_length
if max_length is not None and len(v) > max_length:
v = v[: max_length - 3] + "..."
return v
def _format_locations(self, frames_lines, *, has_introduction):
prepend_with_new_line = has_introduction
regex = r'^ File "(?P<file>.*?)", line (?P<line>[^,]+)(?:, in (?P<function>.*))?\n'
for frame in frames_lines:
match = re.match(regex, frame)
if match:
file, line, function = match.group("file", "line", "function")
is_mine = self._is_file_mine(file)
if function is not None:
pattern = ' File "{}", line {}, in {}\n'
else:
pattern = ' File "{}", line {}\n'
if self._backtrace and function and function.endswith(self._catch_point_identifier):
function = function[: -len(self._catch_point_identifier)]
pattern = ">" + pattern[1:]
if self._colorize and is_mine:
dirname, basename = os.path.split(file)
if dirname:
dirname += os.sep
dirname = self._theme["dirname"].format(dirname)
basename = self._theme["basename"].format(basename)
file = dirname + basename
line = self._theme["line"].format(line)
function = self._theme["function"].format(function)
if self._diagnose and (is_mine or prepend_with_new_line):
pattern = "\n" + pattern
location = pattern.format(file, line, function)
frame = location + frame[match.end() :]
prepend_with_new_line = is_mine
yield frame
def _format_exception(
self, value, tb, *, seen=None, is_first=False, from_decorator=False, group_nesting=0
):
# Implemented from built-in traceback module:
# https://github.com/python/cpython/blob/a5b76167/Lib/traceback.py#L468
exc_type, exc_value, exc_traceback = type(value), value, tb
if seen is None:
seen = set()
seen.add(id(exc_value))
if exc_value:
if exc_value.__cause__ is not None and id(exc_value.__cause__) not in seen:
yield from self._format_exception(
exc_value.__cause__,
exc_value.__cause__.__traceback__,
seen=seen,
group_nesting=group_nesting,
)
cause = "The above exception was the direct cause of the following exception:"
if self._colorize:
cause = self._theme["cause"].format(cause)
if self._diagnose:
yield from self._indent("\n\n" + cause + "\n\n\n", group_nesting)
else:
yield from self._indent("\n" + cause + "\n\n", group_nesting)
elif (
exc_value.__context__ is not None
and id(exc_value.__context__) not in seen
and not exc_value.__suppress_context__
):
yield from self._format_exception(
exc_value.__context__,
exc_value.__context__.__traceback__,
seen=seen,
group_nesting=group_nesting,
)
context = "During handling of the above exception, another exception occurred:"
if self._colorize:
context = self._theme["context"].format(context)
if self._diagnose:
yield from self._indent("\n\n" + context + "\n\n\n", group_nesting)
else:
yield from self._indent("\n" + context + "\n\n", group_nesting)
is_grouped = is_exception_group(value)
if is_grouped and group_nesting == 0:
yield from self._format_exception(
value,
tb,
seen=seen,
group_nesting=1,
is_first=is_first,
from_decorator=from_decorator,
)
return
try:
traceback_limit = sys.tracebacklimit
except AttributeError:
traceback_limit = None
frames, final_source = self._extract_frames(
exc_traceback, is_first, limit=traceback_limit, from_decorator=from_decorator
)
exception_only = traceback.format_exception_only(exc_type, exc_value)
# Determining the correct index for the "Exception: message" part in the formatted exception
# is challenging. This is because it might be preceded by multiple lines specific to
# "SyntaxError" or followed by various notes. However, we can make an educated guess based
# on the indentation; the preliminary context for "SyntaxError" is always indented, while
# the Exception itself is not. This allows us to identify the correct index for the
# exception message.
no_indented_indexes = (i for i, p in enumerate(exception_only) if not p.startswith(" "))
error_message_index = next(no_indented_indexes, None)
if error_message_index is not None:
# Remove final new line temporarily.
error_message = exception_only[error_message_index][:-1]
if self._colorize:
if ":" in error_message:
exception_type, exception_value = error_message.split(":", 1)
exception_type = self._theme["exception_type"].format(exception_type)
exception_value = self._theme["exception_value"].format(exception_value)
error_message = exception_type + ":" + exception_value
else:
error_message = self._theme["exception_type"].format(error_message)
if self._diagnose and frames:
if issubclass(exc_type, AssertionError) and not str(exc_value) and final_source:
if self._colorize:
final_source = self._syntax_highlighter.highlight(final_source)
error_message += ": " + final_source
error_message = "\n" + error_message
exception_only[error_message_index] = error_message + "\n"
if is_first:
yield self._prefix
has_introduction = bool(frames)
if has_introduction:
if is_grouped:
introduction = "Exception Group Traceback (most recent call last):"
else:
introduction = "Traceback (most recent call last):"
if self._colorize:
introduction = self._theme["introduction"].format(introduction)
if group_nesting == 1: # Implies we're processing the root ExceptionGroup.
yield from self._indent(introduction + "\n", group_nesting, prefix="+ ")
else:
yield from self._indent(introduction + "\n", group_nesting)
frames_lines = self._format_list(frames) + exception_only
if self._colorize or self._backtrace or self._diagnose:
frames_lines = self._format_locations(frames_lines, has_introduction=has_introduction)
yield from self._indent("".join(frames_lines), group_nesting)
if is_grouped:
exc = None
for n, exc in enumerate(value.exceptions, start=1):
ruler = "+" + (" %s " % ("..." if n > 15 else n)).center(35, "-")
yield from self._indent(ruler, group_nesting, prefix="+-" if n == 1 else " ")
if n > 15:
message = "and %d more exceptions\n" % (len(value.exceptions) - 15)
yield from self._indent(message, group_nesting + 1)
break
elif group_nesting == 10 and is_exception_group(exc):
message = "... (max_group_depth is 10)\n"
yield from self._indent(message, group_nesting + 1)
else:
yield from self._format_exception(
exc,
exc.__traceback__,
seen=seen,
group_nesting=group_nesting + 1,
)
if not is_exception_group(exc) or group_nesting == 10:
yield from self._indent("-" * 35, group_nesting + 1, prefix="+-")
def _format_list(self, frames):
def source_message(filename, lineno, name, line):
message = ' File "%s", line %d, in %s\n' % (filename, lineno, name)
if line:
message += " %s\n" % line.strip()
return message
def skip_message(count):
plural = "s" if count > 1 else ""
return " [Previous line repeated %d more time%s]\n" % (count, plural)
result = []
count = 0
last_source = None
for *source, line in frames:
if source != last_source and count > 3:
result.append(skip_message(count - 3))
if source == last_source:
count += 1
if count > 3:
continue
else:
count = 1
result.append(source_message(*source, line))
last_source = source
# Add a final skip message if the iteration of frames ended mid-repetition.
if count > 3:
result.append(skip_message(count - 3))
return result
def format_exception(self, type_, value, tb, *, from_decorator=False):
yield from self._format_exception(value, tb, is_first=True, from_decorator=from_decorator)
| ExceptionFormatter |
python | mlflow__mlflow | mlflow/spark/__init__.py | {
"start": 17042,
"end": 44357
} | class ____:
"""
Interface to org.apache.hadoop.fs.FileSystem.
Spark ML models expect to read from and write to Hadoop FileSystem when running on a cluster.
Since MLflow works on local directories, we need this interface to copy the files between
the current DFS and local dir.
"""
def __init__(self):
raise Exception("This class should not be instantiated")
_filesystem = None
_conf = None
@classmethod
def _jvm(cls):
from pyspark import SparkContext
return SparkContext._gateway.jvm
@classmethod
def _fs(cls):
if not cls._filesystem:
cls._filesystem = cls._jvm().org.apache.hadoop.fs.FileSystem.get(cls._conf())
return cls._filesystem
@classmethod
def _conf(cls):
from pyspark import SparkContext
sc = SparkContext.getOrCreate()
return sc._jsc.hadoopConfiguration()
@classmethod
def _local_path(cls, path):
return cls._jvm().org.apache.hadoop.fs.Path(os.path.abspath(path))
@classmethod
def _remote_path(cls, path):
return cls._jvm().org.apache.hadoop.fs.Path(path)
@classmethod
def _stats(cls):
return cls._jvm().org.apache.hadoop.fs.FileSystem.getGlobalStorageStatistics()
@classmethod
def copy_to_local_file(cls, src, dst, remove_src):
cls._fs().copyToLocalFile(remove_src, cls._remote_path(src), cls._local_path(dst))
@classmethod
def copy_from_local_file(cls, src, dst, remove_src):
cls._fs().copyFromLocalFile(remove_src, cls._local_path(src), cls._remote_path(dst))
@classmethod
def qualified_local_path(cls, path):
return cls._fs().makeQualified(cls._local_path(path)).toString()
@classmethod
def maybe_copy_from_local_file(cls, src, dst):
"""
Conditionally copy the file to the Hadoop DFS.
The file is copied iff the configuration has distributed filesystem.
Returns:
If copied, return new target location, otherwise return (absolute) source path.
"""
local_path = cls._local_path(src)
qualified_local_path = cls._fs().makeQualified(local_path).toString()
if qualified_local_path == "file:" + local_path.toString():
return local_path.toString()
cls.copy_from_local_file(src, dst, remove_src=False)
_logger.info("Copied SparkML model to %s", dst)
return dst
@classmethod
def _try_file_exists(cls, dfs_path):
try:
return cls._fs().exists(dfs_path)
except Exception as ex:
# Log a debug-level message, since existence checks may raise exceptions
# in normal operating circumstances that do not warrant warnings
_logger.debug(
"Unexpected exception while checking if model uri is visible on DFS: %s", ex
)
return False
@classmethod
def maybe_copy_from_uri(cls, src_uri, dst_path, local_model_path=None):
"""
Conditionally copy the file to the Hadoop DFS from the source uri.
In case the file is already on the Hadoop DFS do nothing.
Returns:
If copied, return new target location, otherwise return source uri.
"""
try:
# makeQualified throws if wrong schema / uri
dfs_path = cls._fs().makeQualified(cls._remote_path(src_uri))
if cls._try_file_exists(dfs_path):
_logger.info("File '%s' is already on DFS, copy is not necessary.", src_uri)
return src_uri
except Exception:
_logger.info("URI '%s' does not point to the current DFS.", src_uri)
_logger.info("File '%s' not found on DFS. Will attempt to upload the file.", src_uri)
return cls.maybe_copy_from_local_file(
local_model_path or _download_artifact_from_uri(src_uri), dst_path
)
@classmethod
def delete(cls, path):
cls._fs().delete(cls._remote_path(path), True)
@classmethod
def is_filesystem_available(cls, scheme):
return scheme in [stats.getScheme() for stats in cls._stats().iterator()]
def _should_use_mlflowdbfs(root_uri):
# The `mlflowdbfs` scheme does not appear in the available schemes returned from
# the Hadoop FileSystem API until a read call has been issued.
from mlflow.utils._spark_utils import _get_active_spark_session
if (
databricks_utils.is_in_databricks_serverless_runtime()
or databricks_utils.is_in_databricks_shared_cluster_runtime()
or not is_valid_dbfs_uri(root_uri)
or not is_databricks_acled_artifacts_uri(root_uri)
or not databricks_utils.is_in_databricks_runtime()
or (environment_variables._DISABLE_MLFLOWDBFS.get() or "").lower() == "true"
):
return False
try:
databricks_utils._get_dbutils()
except Exception:
# If dbutils is unavailable, indicate that mlflowdbfs is unavailable
# because usage of mlflowdbfs depends on dbutils
return False
mlflowdbfs_read_exception_str = None
try:
_get_active_spark_session().read.load("mlflowdbfs:///artifact?run_id=foo&path=/bar")
except Exception as e:
# The load invocation is expected to throw an exception.
mlflowdbfs_read_exception_str = str(e)
try:
return _HadoopFileSystem.is_filesystem_available(_MLFLOWDBFS_SCHEME)
except Exception:
# The HDFS filesystem logic used to determine mlflowdbfs availability on Databricks
# clusters may not work on certain Databricks cluster types due to unavailability of
# the _HadoopFileSystem.is_filesystem_available() API. As a temporary workaround,
# we check the contents of the expected exception raised by a dummy mlflowdbfs
# read for evidence that mlflowdbfs is available. If "MlflowdbfsClient" is present
# in the exception contents, we can safely assume that mlflowdbfs is available because
# `MlflowdbfsClient` is exclusively used by mlflowdbfs for performing MLflow
# file storage operations
#
# TODO: Remove this logic once the _HadoopFileSystem.is_filesystem_available() check
# below is determined to work on all Databricks cluster types
return "MlflowdbfsClient" in (mlflowdbfs_read_exception_str or "")
def _save_model_metadata(
dst_dir,
spark_model,
mlflow_model,
conda_env,
code_paths,
signature=None,
input_example=None,
pip_requirements=None,
extra_pip_requirements=None,
remote_model_path=None,
):
"""
Saves model metadata into the passed-in directory.
If mlflowdbfs is not used, the persisted metadata assumes that a model can be
loaded from a relative path to the metadata file (currently hard-coded to "sparkml").
If mlflowdbfs is used, remote_model_path should be provided, and the model needs to
be loaded from the remote_model_path.
"""
import pyspark
is_spark_connect_model = _is_spark_connect_model(spark_model)
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, dst_dir)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, dst_dir)
mlflow_model.add_flavor(
FLAVOR_NAME,
pyspark_version=pyspark.__version__,
model_data=_SPARK_MODEL_PATH_SUB,
code=code_dir_subpath,
model_class=_get_fully_qualified_class_name(spark_model),
)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.spark",
data=_SPARK_MODEL_PATH_SUB,
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
)
if size := get_total_file_size(dst_dir):
mlflow_model.model_size_bytes = size
mlflow_model.save(os.path.join(dst_dir, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements(is_spark_connect_model)
if remote_model_path:
_logger.info(
"Inferring pip requirements by reloading the logged model from the databricks "
"artifact repository, which can be time-consuming. To speed up, explicitly "
"specify the conda_env or pip_requirements when calling log_model()."
)
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
remote_model_path or dst_dir,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(dst_dir, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(dst_dir, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(dst_dir, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(dst_dir, _PYTHON_ENV_FILE_NAME))
def _validate_model(spark_model):
from pyspark.ml import Model as PySparkModel
from pyspark.ml import Transformer as PySparkTransformer
from pyspark.ml.util import MLReadable, MLWritable
if _is_spark_connect_model(spark_model):
return
if (
(
not isinstance(spark_model, PySparkModel)
and not isinstance(spark_model, PySparkTransformer)
)
or not isinstance(spark_model, MLReadable)
or not isinstance(spark_model, MLWritable)
):
raise MlflowException(
"Cannot serialize this model. MLflow can only save descendants of pyspark.ml.Model "
"or pyspark.ml.Transformer that implement MLWritable and MLReadable.",
INVALID_PARAMETER_VALUE,
)
def _is_spark_connect_model(spark_model):
"""
Return whether the spark model is spark connect ML model
"""
try:
from pyspark.ml.connect import Model as ConnectModel
return isinstance(spark_model, ConnectModel)
except ImportError:
# pyspark < 3.5 does not support Spark connect ML model
return False
def _is_uc_volume_uri(url):
parsed_url = urlparse(url)
return parsed_url.scheme in ["", "dbfs"] and parsed_url.path.startswith("/Volumes")
def _check_databricks_uc_volume_tmpdir_availability(dfs_tmpdir):
if (
databricks_utils.is_in_databricks_serverless_runtime()
or databricks_utils.is_in_databricks_shared_cluster_runtime()
):
if not dfs_tmpdir or not _is_uc_volume_uri(dfs_tmpdir):
raise MlflowException(
"UC volume path must be provided to save, log or load SparkML models "
"in Databricks shared or serverless clusters. "
"Specify environment variable 'MLFLOW_DFS_TMP' "
"or 'dfs_tmpdir' argument that uses a UC volume path starting with '/Volumes/...' "
"when saving, logging or loading a model."
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="pyspark"))
def save_model(
spark_model,
path,
mlflow_model=None,
conda_env=None,
code_paths=None,
dfs_tmpdir=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
):
"""
Save a Spark MLlib Model to a local path.
By default, this function saves models using the Spark MLlib persistence mechanism.
Args:
spark_model: Spark model to be saved - MLflow can only save descendants of
pyspark.ml.Model or pyspark.ml.Transformer which implement
MLReadable and MLWritable.
path: Local path where the model is to be saved.
mlflow_model: MLflow model config this flavor is being added to.
conda_env: {{ conda_env }}
code_paths: {{ code_paths }}
dfs_tmpdir: Temporary directory path on Distributed (Hadoop) File System (DFS) or local
filesystem if running in local mode. The model is be written in this
destination and then copied to the requested local path. This is necessary
as Spark ML models read from and write to DFS if running on a cluster. All
temporary files created on the DFS are removed if this operation
completes successfully. Defaults to ``/tmp/mlflow``.
signature: See the document of argument ``signature`` in :py:func:`mlflow.spark.log_model`.
input_example: {{ input_example }}
pip_requirements: {{ pip_requirements }}
extra_pip_requirements: {{ extra_pip_requirements }}
metadata: {{ metadata }}
.. code-block:: python
:caption: Example
from mlflow import spark
from pyspark.ml.pipeline import PipelineModel
# your pyspark.ml.pipeline.PipelineModel type
model = ...
mlflow.spark.save_model(model, "spark-model")
"""
_validate_model(spark_model)
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
from pyspark.ml import PipelineModel
from mlflow.utils._spark_utils import _get_active_spark_session
is_spark_connect_model = _is_spark_connect_model(spark_model)
if not is_spark_connect_model and not isinstance(spark_model, PipelineModel):
spark_model = PipelineModel([spark_model])
if mlflow_model is None:
mlflow_model = Model()
if metadata is not None:
mlflow_model.metadata = metadata
# for automatic signature inference, we use an inline implementation rather than the
# `_infer_signature_from_input_example` API because we need to convert model predictions from a
# list into a Pandas series for signature inference.
if signature is None and input_example is not None:
input_ex = _Example(input_example).inference_data
try:
spark = _get_active_spark_session()
if spark is not None:
input_example_spark_df = spark.createDataFrame(input_ex)
# `_infer_spark_model_signature` mutates the model. Copy the model to preserve the
# original model.
try:
spark_model = spark_model.copy()
except Exception:
_logger.debug(
"Failed to copy the model, using the original model.", exc_info=True
)
signature = mlflow.pyspark.ml._infer_spark_model_signature(
spark_model, input_example_spark_df
)
except Exception as e:
if environment_variables._MLFLOW_TESTING.get():
raise
_logger.warning(_LOG_MODEL_INFER_SIGNATURE_WARNING_TEMPLATE, repr(e))
_logger.debug("", exc_info=True)
elif signature is False:
signature = None
sparkml_data_path = os.path.abspath(os.path.join(path, _SPARK_MODEL_PATH_SUB))
if is_spark_connect_model:
spark_model.saveToLocal(sparkml_data_path)
else:
# Spark ML stores the model on DFS if running on a cluster
# Save it to a DFS temp dir first and copy it to local path
if dfs_tmpdir is None:
dfs_tmpdir = MLFLOW_DFS_TMP.get()
_check_databricks_uc_volume_tmpdir_availability(dfs_tmpdir)
tmp_path = generate_tmp_dfs_path(dfs_tmpdir)
spark_model.save(tmp_path)
if databricks_utils.is_in_databricks_runtime() and _is_uc_volume_uri(tmp_path):
# The temp DFS path is a UC volume path.
# Use UC volume fuse mount to read data.
tmp_path_fuse = urlparse(tmp_path).path
shutil.move(src=tmp_path_fuse, dst=sparkml_data_path)
else:
# We're copying the Spark model from DBFS to the local filesystem if (a) the temporary
# DFS URI we saved the Spark model to is a DBFS URI ("dbfs:/my-directory"), or (b) if
# we're running on a Databricks cluster and the URI is schemeless (e.g. looks like a
# filesystem absolute path like "/my-directory")
copying_from_dbfs = is_valid_dbfs_uri(tmp_path) or (
databricks_utils.is_in_cluster() and posixpath.abspath(tmp_path) == tmp_path
)
if copying_from_dbfs and databricks_utils.is_dbfs_fuse_available():
tmp_path_fuse = dbfs_hdfs_uri_to_fuse_path(tmp_path)
shutil.move(src=tmp_path_fuse, dst=sparkml_data_path)
else:
_HadoopFileSystem.copy_to_local_file(tmp_path, sparkml_data_path, remove_src=True)
_save_model_metadata(
dst_dir=path,
spark_model=spark_model,
mlflow_model=mlflow_model,
conda_env=conda_env,
code_paths=code_paths,
signature=signature,
input_example=input_example,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _load_model_databricks_dbfs(dfs_tmpdir, local_model_path):
from pyspark.ml.pipeline import PipelineModel
# Spark ML expects the model to be stored on DFS
# Copy the model to a temp DFS location first. We cannot delete this file, as
# Spark may read from it at any point.
fuse_dfs_tmpdir = dbfs_hdfs_uri_to_fuse_path(dfs_tmpdir)
os.makedirs(fuse_dfs_tmpdir)
# Workaround for inability to use shutil.copytree with DBFS FUSE due to permission-denied
# errors on passthrough-enabled clusters when attempting to copy permission bits for directories
shutil_copytree_without_file_permissions(src_dir=local_model_path, dst_dir=fuse_dfs_tmpdir)
return PipelineModel.load(dfs_tmpdir)
def _load_model_databricks_uc_volume(dfs_tmpdir, local_model_path):
from pyspark.ml.pipeline import PipelineModel
# Copy the model to a temp DFS location first. We cannot delete this file, as
# Spark may read from it at any point.
fuse_dfs_tmpdir = urlparse(dfs_tmpdir).path
shutil.copytree(src=local_model_path, dst=fuse_dfs_tmpdir)
return PipelineModel.load(dfs_tmpdir)
def _load_model(model_uri, dfs_tmpdir_base=None, local_model_path=None):
from pyspark.ml.pipeline import PipelineModel
dfs_tmpdir = generate_tmp_dfs_path(dfs_tmpdir_base or MLFLOW_DFS_TMP.get())
_check_databricks_uc_volume_tmpdir_availability(dfs_tmpdir)
if (
databricks_utils.is_in_databricks_serverless_runtime()
or databricks_utils.is_in_databricks_shared_cluster_runtime()
):
return _load_model_databricks_uc_volume(
dfs_tmpdir, local_model_path or _download_artifact_from_uri(model_uri)
)
if databricks_utils.is_in_cluster() and databricks_utils.is_dbfs_fuse_available():
return _load_model_databricks_dbfs(
dfs_tmpdir, local_model_path or _download_artifact_from_uri(model_uri)
)
model_uri = _HadoopFileSystem.maybe_copy_from_uri(model_uri, dfs_tmpdir, local_model_path)
return PipelineModel.load(model_uri)
def _load_spark_connect_model(model_class, local_path):
return _get_class_from_string(model_class).loadFromLocal(local_path)
def load_model(model_uri, dfs_tmpdir=None, dst_path=None):
"""
Load the Spark MLlib model from the path.
Args:
model_uri: The location, in URI format, of the MLflow model, for example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
dfs_tmpdir: Temporary directory path on Distributed (Hadoop) File System (DFS) or local
filesystem if running in local mode. The model is loaded from this
destination. Defaults to ``/tmp/mlflow``.
dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
Returns:
pyspark.ml.pipeline.PipelineModel
.. code-block:: python
:caption: Example
import mlflow
model = mlflow.spark.load_model("spark-model")
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame(
[(4, "spark i j k"), (5, "l m n"), (6, "spark hadoop spark"), (7, "apache hadoop")],
["id", "text"],
)
# Make predictions on test documents
prediction = model.transform(test)
"""
# This MUST be called prior to appending the model flavor to `model_uri` in order
# for `artifact_path` to take on the correct value for model loading via mlflowdbfs.
root_uri, artifact_path = _get_root_uri_and_artifact_path(model_uri)
local_mlflow_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=dst_path
)
flavor_conf = Model.load(local_mlflow_model_path).flavors[FLAVOR_NAME]
_add_code_from_conf_to_system_path(local_mlflow_model_path, flavor_conf)
model_class = flavor_conf.get("model_class")
if model_class is not None and model_class.startswith("pyspark.ml.connect."):
spark_model_local_path = os.path.join(local_mlflow_model_path, flavor_conf["model_data"])
return _load_spark_connect_model(model_class, spark_model_local_path)
if _should_use_mlflowdbfs(model_uri) and (
run_id := DatabricksArtifactRepository._extract_run_id(model_uri)
):
from pyspark.ml.pipeline import PipelineModel
mlflowdbfs_path = _mlflowdbfs_path(run_id, artifact_path)
with databricks_utils.MlflowCredentialContext(
get_databricks_profile_uri_from_artifact_uri(root_uri)
):
return PipelineModel.load(mlflowdbfs_path)
sparkml_model_uri = append_to_uri_path(model_uri, flavor_conf["model_data"])
local_sparkml_model_path = os.path.join(local_mlflow_model_path, flavor_conf["model_data"])
return _load_model(
model_uri=sparkml_model_uri,
dfs_tmpdir_base=dfs_tmpdir,
local_model_path=local_sparkml_model_path,
)
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_model``.
Args:
path: Local filesystem path to the MLflow Model with the ``spark`` flavor.
"""
from mlflow.utils._spark_utils import (
_create_local_spark_session_for_loading_spark_model,
_get_active_spark_session,
)
model_meta_path = os.path.join(os.path.dirname(path), MLMODEL_FILE_NAME)
model_meta = Model.load(model_meta_path)
model_class = model_meta.flavors[FLAVOR_NAME].get("model_class")
if model_class is not None and model_class.startswith("pyspark.ml.connect."):
# Note:
# Spark connect ML models don't require a spark session for running inference.
spark = None
spark_model = _load_spark_connect_model(model_class, path)
else:
# NOTE: The `_create_local_spark_session_for_loading_spark_model()` call below may change
# settings of the active session which we do not intend to do here.
# In particular, setting master to local[1] can break distributed clusters.
# To avoid this problem, we explicitly check for an active session. This is not ideal but
# there is no good workaround at the moment.
spark = _get_active_spark_session()
if spark is None:
# NB: If there is no existing Spark context, create a new local one.
# NB: We're disabling caching on the new context since we do not need it and we want to
# avoid overwriting cache of underlying Spark cluster when executed on a Spark Worker
# (e.g. as part of spark_udf).
spark = _create_local_spark_session_for_loading_spark_model()
spark_model = _load_model(model_uri=path)
return _PyFuncModelWrapper(spark, spark_model, signature=model_meta.signature)
def _find_and_set_features_col_as_vector_if_needed(spark_df, spark_model):
"""
Finds the `featuresCol` column in spark_model and
then tries to cast that column to `vector` type.
This method is noop if the `featuresCol` is already of type `vector`
or if it can't be cast to `vector` type
Note:
If a spark ML pipeline contains a single Estimator stage, it requires
the input dataframe to contain features column of vector type.
But the autologging for pyspark ML casts vector column to array<double> type
for parity with the pd Dataframe. The following fix is required, which transforms
that features column back to vector type so that the pipeline stages can correctly work.
A valid scenario is if the auto-logged input example is directly used
for prediction, which would otherwise fail without this transformation.
Args:
spark_df: Input dataframe that contains `featuresCol`
spark_model: A pipeline model or a single transformer that contains `featuresCol` param
Returns:
A spark dataframe that contains features column of `vector` type.
"""
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import types as t
from pyspark.sql.functions import udf
def _find_stage_with_features_col(stage):
if stage.hasParam("featuresCol"):
def _array_to_vector(input_array):
return Vectors.dense(input_array)
array_to_vector_udf = udf(f=_array_to_vector, returnType=VectorUDT())
features_col_name = stage.extractParamMap().get(stage.featuresCol)
features_col_type = [
_field
for _field in spark_df.schema.fields
if _field.name == features_col_name
and _field.dataType
in [t.ArrayType(t.DoubleType(), True), t.ArrayType(t.DoubleType(), False)]
]
if len(features_col_type) == 1:
return spark_df.withColumn(
features_col_name, array_to_vector_udf(features_col_name)
)
return spark_df
if hasattr(spark_model, "stages"):
for stage in reversed(spark_model.stages):
return _find_stage_with_features_col(stage)
return _find_stage_with_features_col(spark_model)
| _HadoopFileSystem |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/dictionary.py | {
"start": 3759,
"end": 6663
} | class ____(Dict[Any, Any]):
def __setitem__(self, key: Any, value: Any) -> None:
_test_sink(key)
def tainted_setitem(d: SpecialSetitemDict) -> SpecialSetitemDict:
d[_test_source()] = 1
return d
def forward_comprehension_value_source():
d = {"a": _test_source() for x in []}
return d
def forward_comprehension_key_source():
d = {_test_source(): 0 for x in []}
return d
def forward_comprehension_value_sink(arg):
d = {"a": _test_sink(x) for x in [arg]}
def forward_comprehension_key_sink(arg):
d = {_test_sink(x): 0 for x in [arg]}
def lists_of_dictionary_iteration_is_precise():
list_of_dicts = [{"with_feature": _test_source(), "without_feature": 0} for x in []]
for dict in list_of_dicts:
_test_sink(dict["with_feature"])
_test_sink(dict["without_feature"])
def reassignment_removes_backwards_taint(d):
d["a"] = 0
_test_sink(d["a"])
def copy_untainted_values_with_tainted_keys():
d = {_test_source(): 1}
values_not_tainted = {}
for key in d:
values_not_tainted[key] = d[key]
return values_not_tainted
def dict_with_tainted_key_flows_to_sink():
d = {_test_source(): 1}
_test_sink(d)
def dict_with_tainted_key_flows_to_sink_via_setitem():
d = {}
d[_test_source()] = 1
_test_sink(d)
def sink_dictionary_through_keys(d: Dict[str, str]) -> None:
[_test_sink(k) for k in d]
def get_keys(d: Dict[str, str]) -> Iterable[str]:
return [k for k in d]
def return_comprehension_with_tained_keys():
d = {_test_source(): 1}
return [k for k in d]
def return_comprehension_with_untainted_keys():
d = {1: _test_source()}
return [k for k in d]
def backwards_model_for_dictionary_comprehension(d) -> None:
inferred = {k: d[k] for k in d}
sink_dictionary_through_keys(inferred)
def test_keys_and_values():
tainted_values = {"benign": ("benign", _test_source())}
# Should be an issue.
_test_sink(tainted_values.values())
# Shouldn't be an issue.
_test_sink(tainted_values.keys())
for item in tainted_values.values():
_test_sink(item[0])
tainted_keys = {_test_source(): ""}
# Should be an issue.
_test_sink(tainted_keys.keys())
# Shouldn't be an issue.
_test_sink(tainted_keys.values())
tainted_tuple_keys = {(_test_source(), 0): ""}
for key in tainted_tuple_keys.keys():
# Should be an issue.
_test_sink(key[0])
# Shouldn't be an issue.
_test_sink(key[1])
def backwards_field_assignment(external):
d = {}
d["index"] = external
return d
def return_tito_literally(external):
return {"index": external}
def test_with_issue_in_dict_comprehension():
sources = [_test_source()]
{"k": s for s in sources if _test_sink(s)}
TV = TypeVar("_T")
def to_map(x: Dict[str, TV]) -> Mapping[str, TV]:
return x
| SpecialSetitemDict |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/errors.py | {
"start": 1481,
"end": 1595
} | class ____:
field_names: Sequence[str]
field_snaps: Sequence[ConfigFieldSnap]
@record
| MissingFieldsErrorData |
python | doocs__leetcode | lcci/01.04.Palindrome Permutation/Solution.py | {
"start": 0,
"end": 145
} | class ____:
def canPermutePalindrome(self, s: str) -> bool:
cnt = Counter(s)
return sum(v & 1 for v in cnt.values()) < 2
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_server_tool_caller_param.py | {
"start": 225,
"end": 365
} | class ____(TypedDict, total=False):
tool_id: Required[str]
type: Required[Literal["code_execution_20250825"]]
| BetaServerToolCallerParam |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/organization_detector_workflow_details.py | {
"start": 1318,
"end": 4069
} | class ____(OrganizationEndpoint):
def convert_args(self, request: Request, detector_workflow_id, *args, **kwargs):
args, kwargs = super().convert_args(request, *args, **kwargs)
try:
kwargs["detector_workflow"] = DetectorWorkflow.objects.get(
workflow__organization=kwargs["organization"], id=detector_workflow_id
)
except DetectorWorkflow.DoesNotExist:
raise ResourceDoesNotExist
return args, kwargs
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
"DELETE": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
permission_classes = (OrganizationDetectorPermission,)
@extend_schema(
operation_id="Fetch a Detector-Workflow Connection",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
DetectorWorkflowParams.DETECTOR_WORKFLOW_ID,
],
responses={
201: DetectorWorkflowSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(
self, request: Request, organization: Organization, detector_workflow: DetectorWorkflow
):
"""
Returns a DetectorWorkflow
"""
serialized_detector_workflow = serialize(
detector_workflow,
request.user,
DetectorWorkflowSerializer(),
)
return Response(serialized_detector_workflow)
@extend_schema(
operation_id="Remove a Detector-Workflow Connection",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
DetectorWorkflowParams.DETECTOR_WORKFLOW_ID,
],
responses={
204: RESPONSE_NO_CONTENT,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(
self, request: Request, organization: Organization, detector_workflow: DetectorWorkflow
):
"""
Delete a DetectorWorkflow
"""
if not can_edit_detector_workflow_connections(detector_workflow.detector, request):
raise PermissionDenied
detector_workflow_id = detector_workflow.id
audit_log_data = detector_workflow.get_audit_log_data()
detector_workflow.delete()
create_audit_entry(
request=request,
organization=organization,
target_object=detector_workflow_id,
event=audit_log.get_event_id("DETECTOR_WORKFLOW_REMOVE"),
data=audit_log_data,
)
return Response(status=status.HTTP_204_NO_CONTENT)
| OrganizationDetectorWorkflowDetailsEndpoint |
python | huggingface__transformers | src/transformers/models/kosmos2_5/modeling_kosmos2_5.py | {
"start": 28334,
"end": 30586
} | class ____(nn.Module):
def __init__(self, config: Kosmos2_5VisionConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([Kosmos2_5VisionLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def _prepare_attention_mask(self, attention_mask, input_shape, inputs_embeds):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
return expanded_attn_mask
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
attention_mask = self._prepare_attention_mask(attention_mask, hidden_states.shape[:2], hidden_states)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, output_attentions, **kwargs)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.kosmos2.modeling_kosmos2.Kosmos2TextSinusoidalPositionalEmbedding with Kosmos2->Kosmos2_5
| Kosmos2_5VisionEncoder |
python | getsentry__sentry | src/sentry/eventstream/snuba.py | {
"start": 2588,
"end": 16278
} | class ____(EventStream):
# Beware! Changing this protocol (introducing a new version, or the message
# format/fields themselves) requires consideration of all downstream
# consumers. This includes the post-processing forwarder code!
EVENT_PROTOCOL_VERSION = 2
# These keys correspond to tags that are typically prefixed with `sentry:`
# and will wreak havok in the UI if both the `sentry:`-prefixed and
# non-prefixed variations occur in a response.
UNEXPECTED_TAG_KEYS = frozenset(["dist", "release", "user"])
def _get_headers_for_insert(
self,
event: Event | GroupEvent,
is_new: bool,
is_regression: bool,
is_new_group_environment: bool,
primary_hash: str | None,
received_timestamp: float | datetime,
skip_consume: bool,
group_states: GroupStates | None = None,
) -> MutableMapping[str, str]:
return {
"Received-Timestamp": str(received_timestamp),
}
def insert(
self,
event: Event | GroupEvent,
is_new: bool,
is_regression: bool,
is_new_group_environment: bool,
primary_hash: str | None,
received_timestamp: float | datetime,
skip_consume: bool = False,
group_states: GroupStates | None = None,
eventstream_type: str | None = None,
**kwargs: Any,
) -> None:
if isinstance(event, GroupEvent) and not event.occurrence:
logger.error(
"`GroupEvent` passed to `EventStream.insert`. `GroupEvent` may only be passed when "
"associated with an `IssueOccurrence`",
)
return
project = event.project
set_current_event_project(project.id)
retention_days = quotas.backend.get_event_retention(organization=project.organization)
event_data = event.get_raw_data(for_stream=True)
unexpected_tags = {
k
for (k, v) in (get_path(event_data, "tags", filter=True) or [])
if k in self.UNEXPECTED_TAG_KEYS
}
if unexpected_tags:
logger.error("%r received unexpected tags: %r", self, unexpected_tags)
headers = self._get_headers_for_insert(
event,
is_new,
is_regression,
is_new_group_environment,
primary_hash,
received_timestamp,
skip_consume,
group_states,
)
skip_semantic_partitioning = (
kwargs[KW_SKIP_SEMANTIC_PARTITIONING]
if KW_SKIP_SEMANTIC_PARTITIONING in kwargs
else False
)
event_type = self._get_event_type(event)
occurrence_data = self._get_occurrence_data(event)
# instead of normalizing and doing custom 'contexts' processing in snuba, we elect to do it here instead to
# avoid having to clutter up snuba code with business logic
if event_type == EventStreamEventType.Generic:
event_data = dict(event_data)
contexts = event_data.setdefault("contexts", {})
# add user.geo to contexts if it exists
user_dict = event_data.get("user") or {}
geo = user_dict.get("geo", {})
if "geo" not in contexts and isinstance(geo, dict):
contexts["geo"] = geo
# transactions processing has a configurable 'skipped contexts' to skip writing specific contexts maps
# to the row. for now, we're ignoring that until we have a need for it
self._send(
project.id,
"insert",
extra_data=(
{
"group_id": event.group_id,
"group_ids": [group.id for group in getattr(event, "groups", [])],
"group_first_seen": (
json.datetime_to_str(event.group.first_seen)
if event.group is not None
else None
),
"event_id": event.event_id,
"organization_id": project.organization_id,
"project_id": event.project_id,
# TODO(mitsuhiko): We do not want to send this incorrect
# message but this is what snuba needs at the moment.
"message": event.search_message,
"platform": event.platform,
"datetime": json.datetime_to_str(event.datetime),
"data": event_data,
"primary_hash": primary_hash,
"retention_days": retention_days,
"occurrence_id": occurrence_data.get("id"),
"occurrence_data": occurrence_data,
},
{
"is_new": is_new,
"is_regression": is_regression,
"is_new_group_environment": is_new_group_environment,
"skip_consume": skip_consume,
"group_states": group_states,
},
),
headers=headers,
asynchronous=kwargs.get("asynchronous", True),
skip_semantic_partitioning=skip_semantic_partitioning,
event_type=event_type,
)
if in_rollout_group("eventstream.eap_forwarding_rate", event.project_id):
self._forward_event_to_items(event, event_data, event_type, project)
def _missing_required_item_fields(self, event_data: Mapping[str, Any]) -> list[str]:
root_level_fields = ["event_id", "timestamp"]
missing_fields = [field for field in root_level_fields if field not in event_data]
trace_id = get_path(event_data, "contexts", "trace", "trace_id", default=None)
if trace_id is None:
missing_fields.append("trace_id")
return missing_fields
def _forward_event_to_items(
self,
event: Event | GroupEvent,
event_data: Mapping[str, Any],
event_type: EventStreamEventType,
project: Project,
) -> None:
if not (
event_type == EventStreamEventType.Error or event_type == EventStreamEventType.Generic
):
return
missing_fields = self._missing_required_item_fields(event_data)
if missing_fields:
logger.debug(
"Event data is missing required fields to forward to items: %s", missing_fields
)
return
self._send_item(serialize_event_data_as_item(event, event_data, project))
def start_delete_groups(self, project_id: int, group_ids: Sequence[int]) -> Mapping[str, Any]:
if not group_ids:
raise ValueError("expected groups to delete!")
state = {
"transaction_id": str(uuid4().hex),
"project_id": project_id,
"group_ids": list(group_ids),
"datetime": json.datetime_to_str(datetime.now(tz=timezone.utc)),
}
self._send(project_id, "start_delete_groups", extra_data=(state,), asynchronous=False)
return state
def end_delete_groups(self, state: Mapping[str, Any]) -> None:
state_copy: MutableMapping[str, Any] = {**state}
state_copy["datetime"] = json.datetime_to_str(datetime.now(tz=timezone.utc))
self._send(
state_copy["project_id"],
"end_delete_groups",
extra_data=(state_copy,),
asynchronous=False,
)
def start_merge(
self,
project_id: int,
previous_group_ids: Sequence[int],
new_group_id: int,
new_group_first_seen: datetime | None = None,
) -> dict[str, Any]:
if not previous_group_ids:
raise ValueError("expected groups to merge!")
state = {
"transaction_id": uuid4().hex,
"project_id": project_id,
"previous_group_ids": list(previous_group_ids),
"new_group_id": new_group_id,
"datetime": json.datetime_to_str(datetime.now(tz=timezone.utc)),
}
if new_group_first_seen is not None:
state["new_group_first_seen"] = json.datetime_to_str(new_group_first_seen)
self._send(project_id, "start_merge", extra_data=(state,), asynchronous=False)
return state
def end_merge(self, state: Mapping[str, Any]) -> None:
state_copy: MutableMapping[str, Any] = {**state}
state_copy["datetime"] = json.datetime_to_str(datetime.now(tz=timezone.utc))
self._send(
state_copy["project_id"], "end_merge", extra_data=(state_copy,), asynchronous=False
)
def start_unmerge(
self, project_id: int, hashes: Collection[str], previous_group_id: int, new_group_id: int
) -> Mapping[str, Any] | None:
if not hashes:
return None
state = {
"transaction_id": uuid4().hex,
"project_id": project_id,
"previous_group_id": previous_group_id,
"new_group_id": new_group_id,
"hashes": list(hashes),
"datetime": json.datetime_to_str(datetime.now(tz=timezone.utc)),
}
self._send(project_id, "start_unmerge", extra_data=(state,), asynchronous=False)
return state
def end_unmerge(self, state: Mapping[str, Any]) -> None:
state_copy: MutableMapping[str, Any] = {**state}
state_copy["datetime"] = json.datetime_to_str(datetime.now(tz=timezone.utc))
self._send(
state_copy["project_id"], "end_unmerge", extra_data=(state_copy,), asynchronous=False
)
def start_delete_tag(self, project_id: int, tag: str) -> Mapping[str, Any]:
if not tag:
raise ValueError("expected tag")
state = {
"transaction_id": uuid4().hex,
"project_id": project_id,
"tag": tag,
"datetime": json.datetime_to_str(datetime.now(tz=timezone.utc)),
}
self._send(project_id, "start_delete_tag", extra_data=(state,), asynchronous=False)
return state
def end_delete_tag(self, state: Mapping[str, Any]) -> None:
state_copy: MutableMapping[str, Any] = {**state}
state_copy["datetime"] = json.datetime_to_str(datetime.now(tz=timezone.utc))
self._send(
state_copy["project_id"], "end_delete_tag", extra_data=(state_copy,), asynchronous=False
)
def tombstone_events_unsafe(
self,
project_id: int,
event_ids: Sequence[str],
old_primary_hash: str | None = None,
from_timestamp: datetime | None = None,
to_timestamp: datetime | None = None,
) -> None:
"""
Tell Snuba to eventually delete these events.
This marks events as deleted but does not immediately exclude those
events from all queries. Because of that limitation this is not proper,
because not immediate, event deletion.
"Proper" group deletion is essentially running this function for every
event in the group, plus `exclude_groups` to make sure the changes are
immediately user-visible.
Reprocessing (v2) splits a group into events-to-be-reprocessed
(re-insert with new group_id) and events-to-be-deleted
(`tombstone_events`), then excludes the group from all queries
(`exclude_groups`).
:param old_primary_hash: If present, the event is only tombstoned
to be reinserted over with a guaranteed-different primary hash.
This is necessary with Snuba's errors table as the primary_hash is
part of the PK/sortkey.
"""
state = {
"project_id": project_id,
"event_ids": event_ids,
"old_primary_hash": old_primary_hash,
"from_timestamp": from_timestamp,
"to_timestamp": to_timestamp,
}
self._send(project_id, "tombstone_events", extra_data=(state,), asynchronous=False)
def replace_group_unsafe(
self,
project_id: int,
event_ids: Sequence[str],
new_group_id: int,
from_timestamp: datetime | None = None,
to_timestamp: datetime | None = None,
) -> None:
"""
Tell Snuba to move events into a new group ID
Same caveats as tombstone_events
"""
state = {
"project_id": project_id,
"event_ids": event_ids,
"new_group_id": new_group_id,
"from_timestamp": from_timestamp,
"to_timestamp": to_timestamp,
}
self._send(project_id, "replace_group", extra_data=(state,), asynchronous=False)
def exclude_groups(self, project_id: int, group_ids: Sequence[int]) -> None:
"""
Exclude a group from queries for a while until event tombstoning takes
effect. See docstring of `tombstone_events`.
`exclude_groups` basically makes Snuba add `where group_id not in (1,
2, ...)` to every query.
"""
state = {"project_id": project_id, "group_ids": group_ids}
self._send(project_id, "exclude_groups", extra_data=(state,), asynchronous=False)
def _send(
self,
project_id: int,
_type: str,
extra_data: tuple[Any, ...] = (),
asynchronous: bool = True,
headers: MutableMapping[str, str] | None = None,
skip_semantic_partitioning: bool = False,
event_type: EventStreamEventType = EventStreamEventType.Error,
) -> None:
raise NotImplementedError
def _send_item(self, trace_item: TraceItem) -> None:
raise NotImplementedError
| SnubaProtocolEventStream |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 12918,
"end": 13249
} | class ____(TemporalType):
"""
Data type representing a calendar date.
Notes
-----
The underlying representation of this type is a 32-bit signed integer.
The integer indicates the number of days since the Unix epoch (1970-01-01).
The number can be negative to indicate dates before the epoch.
"""
| Date |
python | modin-project__modin | modin/core/dataframe/pandas/dataframe/utils.py | {
"start": 19725,
"end": 39301
} | class ____(ShuffleSortFunctions):
def __init__(
self,
modin_frame: "PandasDataframe",
columns: Union[str, list],
ascending: Union[list, bool],
ideal_num_new_partitions: int,
resample_kwargs: dict,
**kwargs: dict,
):
resample_kwargs = resample_kwargs.copy()
rule = resample_kwargs.pop("rule")
if resample_kwargs["closed"] is None:
# this rule regarding the default value of 'closed' is inherited
# from pandas documentation for 'pandas.DataFrame.resample'
if rule in ("ME", "YE", "QE", "BME", "BA", "BQE", "W"):
resample_kwargs["closed"] = "right"
else:
resample_kwargs["closed"] = "left"
super().__init__(
modin_frame,
columns,
ascending,
ideal_num_new_partitions,
closed_on_right=resample_kwargs["closed"] == "right",
**kwargs,
)
resample_kwargs["freq"] = to_offset(rule)
self.resample_kwargs = resample_kwargs
@staticmethod
def pick_samples_for_quantiles(
df: pandas.DataFrame,
num_partitions: int,
length: int,
) -> pandas.DataFrame:
# to build proper bins we need min and max timestamp of the whole DatetimeIndex,
# so computing it in each partition
return pandas.concat([df.min().to_frame().T, df.max().to_frame().T])
def pick_pivots_from_samples_for_sort(
self,
samples: np.ndarray,
ideal_num_new_partitions: int,
method: str = "linear",
key: Optional[Callable] = None,
) -> np.ndarray:
if key is not None:
raise NotImplementedError(key)
max_value = samples.max()
first, last = _get_timestamp_range_edges(
samples.min(),
max_value,
self.resample_kwargs["freq"],
unit=samples.dt.unit,
closed=self.resample_kwargs["closed"],
origin=self.resample_kwargs["origin"],
offset=self.resample_kwargs["offset"],
)
all_bins = pandas.date_range(
start=first,
end=last,
freq=self.resample_kwargs["freq"],
ambiguous=True,
nonexistent="shift_forward",
unit=samples.dt.unit,
)
all_bins = self._adjust_bin_edges(
all_bins,
max_value,
freq=self.resample_kwargs["freq"],
closed=self.resample_kwargs["closed"],
)
# take pivot values with an even interval
step = 1 / ideal_num_new_partitions
bins = [
all_bins[int(len(all_bins) * i * step)]
for i in range(1, ideal_num_new_partitions)
]
return bins
def _adjust_bin_edges(
self,
binner: pandas.DatetimeIndex,
end_timestamp,
freq,
closed,
) -> pandas.DatetimeIndex:
"""
Adjust bin edges.
This function was copied & simplified from ``pandas.core.resample.TimeGrouper._adjuct_bin_edges()``.
Parameters
----------
binner : pandas.DatetimeIndex
end_timestamp : pandas.Timestamp
freq : str
closed : bool
Returns
-------
pandas.DatetimeIndex
"""
# Some hacks for > daily data, see pandas-dev/pandas#1471, pandas-dev/pandas#1458, pandas-dev/pandas#1483
if freq.name not in ("BME", "ME", "W") and freq.name.split("-")[0] not in (
"BQE",
"BYE",
"QE",
"YE",
"W",
):
return binner
# If the right end-point is on the last day of the month, roll forwards
# until the last moment of that day. Note that we only do this for offsets
# which correspond to the end of a super-daily period - "month start", for
# example, is excluded.
if closed == "right":
# GH 21459, GH 9119: Adjust the bins relative to the wall time
edges_dti = binner.tz_localize(None)
edges_dti = (
edges_dti
+ pandas.Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit)
- pandas.Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit)
)
binner = edges_dti.tz_localize(binner.tz)
# intraday values on last day
if binner[-2] > end_timestamp:
binner = binner[:-1]
return binner
@staticmethod
def split_partitions_using_pivots_for_sort(
df: pandas.DataFrame,
columns_info: "list[ColumnInfo]",
ascending: bool,
closed_on_right: bool = True,
**kwargs: dict,
) -> "tuple[pandas.DataFrame, ...]":
def add_attr(df, timestamp):
if "bin_bounds" in df.attrs:
df.attrs["bin_bounds"] = (*df.attrs["bin_bounds"], timestamp)
else:
df.attrs["bin_bounds"] = (timestamp,)
return df
result = ShuffleSortFunctions.split_partitions_using_pivots_for_sort(
df, columns_info, ascending, **kwargs
)
# it's required for each bin to know its bounds in order for resampling to work
# properly when down-sampling occurs. Reach here for an example:
# https://github.com/modin-project/modin/pull/7140#discussion_r1549246505
# We're writing the bounds as 'attrs' to avoid duplications in the final partition
for i, pivot in enumerate(columns_info[0].pivots):
add_attr(result[i], pivot - pandas.Timedelta(1, unit="ns"))
if i + 1 <= len(result):
add_attr(result[i + 1], pivot + pandas.Timedelta(1, unit="ns"))
return result
def lazy_metadata_decorator(apply_axis=None, axis_arg=-1, transpose=False):
"""
Lazily propagate metadata for the ``PandasDataframe``.
This decorator first adds the minimum required reindexing operations
to each partition's queue of functions to be lazily applied for
each PandasDataframe in the arguments by applying the function
run_f_on_minimally_updated_metadata. The decorator also sets the
flags for deferred metadata synchronization on the function result
if necessary.
Parameters
----------
apply_axis : str, default: None
The axes on which to apply the reindexing operations to the `self._partitions` lazily.
Case None: No lazy metadata propagation.
Case "both": Add reindexing operations on both axes to partition queue.
Case "opposite": Add reindexing operations complementary to given axis.
Case "rows": Add reindexing operations on row axis to partition queue.
axis_arg : int, default: -1
The index or column axis.
transpose : bool, default: False
Boolean for if a transpose operation is being used.
Returns
-------
Wrapped Function.
"""
def decorator(f):
from functools import wraps
@wraps(f)
def run_f_on_minimally_updated_metadata(self, *args, **kwargs):
from .dataframe import PandasDataframe
for obj in (
[self]
+ [o for o in args if isinstance(o, PandasDataframe)]
+ [v for v in kwargs.values() if isinstance(v, PandasDataframe)]
+ [
d
for o in args
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
+ [
d
for _, o in kwargs.items()
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
):
if apply_axis == "both":
if obj._deferred_index and obj._deferred_column:
obj._propagate_index_objs(axis=None)
elif obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif apply_axis == "opposite":
if "axis" not in kwargs:
axis = args[axis_arg]
else:
axis = kwargs["axis"]
if axis == 0 and obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif axis == 1 and obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif apply_axis == "rows":
obj._propagate_index_objs(axis=0)
result = f(self, *args, **kwargs)
if apply_axis is None and not transpose:
result._deferred_index = self._deferred_index
result._deferred_column = self._deferred_column
elif apply_axis is None and transpose:
result._deferred_index = self._deferred_column
result._deferred_column = self._deferred_index
elif apply_axis == "opposite":
if axis == 0:
result._deferred_index = self._deferred_index
else:
result._deferred_column = self._deferred_column
elif apply_axis == "rows":
result._deferred_column = self._deferred_column
return result
return run_f_on_minimally_updated_metadata
return decorator
def add_missing_categories_to_groupby(
dfs,
by,
operator,
initial_columns,
combined_cols,
is_udf_agg,
kwargs,
initial_dtypes=None,
):
"""
Generate values for missing categorical values to be inserted into groupby result.
This function is used to emulate behavior of ``groupby(observed=False)`` parameter,
it takes groupby result that was computed using ``groupby(observed=True)``
and computes results for categorical values that are not presented in `dfs`.
Parameters
----------
dfs : list of pandas.DataFrames
Row partitions containing groupby results.
by : list of hashable
Column labels that were used to perform groupby.
operator : callable
Aggregation function that was used during groupby.
initial_columns : pandas.Index
Column labels of the original dataframe.
combined_cols : pandas.Index
Column labels of the groupby result.
is_udf_agg : bool
Whether ``operator`` is a UDF.
kwargs : dict
Parameters that were passed to ``groupby(by, **kwargs)``.
initial_dtypes : pandas.Series, optional
Dtypes of the original dataframe. If not specified, assume it's ``int64``.
Returns
-------
masks : dict[int, pandas.DataFrame]
Mapping between partition idx and a dataframe with results for missing categorical values
to insert to this partition.
new_combined_cols : pandas.Index
New column labels of the groupby result. If ``is_udf_agg is True``, then ``operator``
may change the resulted columns.
"""
kwargs["observed"] = False
new_combined_cols = combined_cols
### At first we need to compute missing categorical values
indices = [df.index for df in dfs]
# total_index contains all categorical values that resided in the result,
# missing values are computed differently depending on whether we're grouping
# on multiple groupers or not
total_index = indices[0].append(indices[1:])
if isinstance(total_index, pandas.MultiIndex):
if all(
not isinstance(level, pandas.CategoricalIndex)
for level in total_index.levels
):
return {}, new_combined_cols
missing_cats_dtype = {
name: (
level.dtype
if isinstance(level.dtype, pandas.CategoricalDtype)
# it's a bit confusing but we have to convert the remaining 'by' columns to categoricals
# in order to compute a proper fill value later in the code
else pandas.CategoricalDtype(level)
)
for level, name in zip(total_index.levels, total_index.names)
}
# if we're grouping on multiple groupers, then the missing categorical values is a
# carthesian product of (actual_missing_categorical_values X all_values_of_another_groupers)
complete_index = pandas.MultiIndex.from_product(
[
value.categories.astype(total_level.dtype)
for total_level, value in zip(
total_index.levels, missing_cats_dtype.values()
)
],
names=by,
)
missing_index = complete_index[~complete_index.isin(total_index)]
else:
if not isinstance(total_index, pandas.CategoricalIndex):
return {}, new_combined_cols
# if we're grouping on a single grouper then we simply compute the difference
# between categorical values in the result and the values defined in categorical dtype
missing_index = total_index.categories.difference(total_index.values)
missing_cats_dtype = {by[0]: pandas.CategoricalDtype(missing_index)}
missing_index.names = by
if len(missing_index) == 0:
return {}, new_combined_cols
### At this stage we want to get a fill_value for missing categorical values
if is_udf_agg and isinstance(total_index, pandas.MultiIndex):
# if grouping on multiple columns and aggregating with an UDF, then the
# fill value is always `np.nan`
missing_values = pandas.DataFrame({0: [np.nan]})
else:
# In case of a udf aggregation we're forced to run the operator against each
# missing category, as in theory it can return different results for each
# empty group. In other cases it's enough to run the operator against a single
# missing categorical and then broadcast the fill value to each missing value
if not is_udf_agg:
missing_cats_dtype = {
key: pandas.CategoricalDtype(value.categories[:1])
for key, value in missing_cats_dtype.items()
}
empty_df = pandas.DataFrame(columns=initial_columns)
# HACK: default 'object' dtype doesn't fit our needs, as most of the aggregations
# fail on a non-numeric columns, ideally, we need dtypes of the original dataframe,
# however, 'int64' also works fine here if the original schema is not available
empty_df = empty_df.astype(
"int64" if initial_dtypes is None else initial_dtypes
)
empty_df = empty_df.astype(missing_cats_dtype)
missing_values = operator(empty_df.groupby(by, **kwargs))
if is_udf_agg and not isinstance(total_index, pandas.MultiIndex):
missing_values = missing_values.drop(columns=by, errors="ignore")
new_combined_cols = pandas.concat(
[
pandas.DataFrame(columns=combined_cols),
missing_values.iloc[:0],
],
axis=0,
join="outer",
).columns
else:
# HACK: If the aggregation has failed, the result would be empty. Assuming the
# fill value to be `np.nan` here (this may not always be correct!!!)
fill_value = np.nan if len(missing_values) == 0 else missing_values.iloc[0, 0]
missing_values = pandas.DataFrame(
fill_value, index=missing_index, columns=combined_cols
)
# restoring original categorical dtypes for the indices (MultiIndex already have proper dtypes)
if not isinstance(missing_values.index, pandas.MultiIndex):
missing_values.index = missing_values.index.astype(total_index.dtype)
### Then we decide to which missing categorical values should go to which partition
if not kwargs["sort"]:
# If the result is allowed to be unsorted, simply insert all the missing
# categories to the last partition
mask = {len(indices) - 1: missing_values}
return mask, new_combined_cols
# If the result has to be sorted, we have to assign missing categoricals to proper partitions.
# For that purpose we define bins with corner values of each partition and then using either
# np.digitize or np.searchsorted find correct bins for each missing categorical value.
# Example: part0-> [0, 1, 2]; part1-> [3, 4, 10, 12]; part2-> [15, 17, 20, 100]
# bins -> [2, 12] # took last values of each partition excluding the last partition
# (every value that's matching 'x > part[-2][-1]' should go to the
# last partition, meaning that including the last value of the last
# partitions doesn't make sense)
# missing_cats -> [-2, 5, 6, 14, 21, 120]
# np.digitize(missing_cats, bins) -> [ 0, 1, 1, 2, 2, 2]
# ^-- mapping between values and partition idx to insert
bins = []
old_bins_to_new = {}
offset = 0
# building bins by taking last values of each partition excluding the last partition
for idx in indices[:-1]:
if len(idx) == 0:
# if a partition is empty, we can't use its values to define a bin, thus we simply
# skip it and remember the number of skipped partitions as an 'offset'
offset += 1
continue
# remember the number of skipped partitions before this bin, in order to restore original
# indexing at the end
old_bins_to_new[len(bins)] = offset
# for MultiIndices we always use the very first level for bins as using multiple levels
# doesn't affect the result
bins.append(idx[-1][0] if isinstance(idx, pandas.MultiIndex) else idx[-1])
old_bins_to_new[len(bins)] = offset
if len(bins) == 0:
# insert values to the first non-empty partition
return {old_bins_to_new.get(0, 0): missing_values}, new_combined_cols
# we used the very first level of MultiIndex to build bins, meaning that we also have
# to use values of the first index's level for 'digitize'
lvl_zero = (
missing_values.index.levels[0]
if isinstance(missing_values.index, pandas.MultiIndex)
else missing_values.index
)
if pandas.api.types.is_any_real_numeric_dtype(lvl_zero):
part_idx = np.digitize(lvl_zero, bins, right=True)
else:
part_idx = np.searchsorted(bins, lvl_zero)
### In the end we build a dictionary mapping partition index to a dataframe with missing categoricals
### to be inserted into this partition
masks = {}
if isinstance(total_index, pandas.MultiIndex):
for idx, values in pandas.RangeIndex(len(lvl_zero)).groupby(part_idx).items():
masks[idx] = missing_values[
pandas.Index(missing_values.index.codes[0]).isin(values)
]
else:
frame_idx = missing_values.index.to_frame()
for idx, values in lvl_zero.groupby(part_idx).items():
masks[idx] = missing_values[frame_idx.iloc[:, 0].isin(values)]
# Restore the original indexing by adding the amount of skipped missing partitions
masks = {key + old_bins_to_new[key]: value for key, value in masks.items()}
return masks, new_combined_cols
| ShuffleResample |
python | matplotlib__matplotlib | tools/boilerplate.py | {
"start": 3349,
"end": 14393
} | class ____:
"""
A placeholder class to destringify annotations from ast
"""
def __init__(self, value):
self._repr = value
def __repr__(self):
return self._repr
def generate_function(name, called_fullname, template, **kwargs):
"""
Create a wrapper function *pyplot_name* calling *call_name*.
Parameters
----------
name : str
The function to be created.
called_fullname : str
The method to be wrapped in the format ``"Class.method"``.
template : str
The template to be used. The template must contain {}-style format
placeholders. The following placeholders are filled in:
- name: The function name.
- signature: The function signature (including parentheses).
- called_name: The name of the called function.
- call: Parameters passed to *called_name* (including parentheses).
**kwargs
Additional parameters are passed to ``template.format()``.
"""
# Get signature of wrapped function.
class_name, called_name = called_fullname.split('.')
class_ = {'Axes': Axes, 'Figure': Figure}[class_name]
meth = getattr(class_, called_name)
decorator = _api.deprecation.DECORATORS.get(meth)
# Generate the wrapper with the non-kwonly signature, as it will get
# redecorated with make_keyword_only by _copy_docstring_and_deprecators.
if decorator and decorator.func is _api.make_keyword_only:
meth = meth.__wrapped__
annotated_trees = get_ast_mro_trees(class_)
signature = get_matching_signature(meth, annotated_trees)
# Replace self argument.
params = list(signature.parameters.values())[1:]
has_return_value = str(signature.return_annotation) != 'None'
signature = str(signature.replace(parameters=[
param.replace(default=value_formatter(param.default))
if param.default is not param.empty else param
for param in params]))
# How to call the wrapped function.
call = '(' + ', '.join((
# Pass "intended-as-positional" parameters positionally to avoid
# forcing third-party subclasses to reproduce the parameter names.
'{0}'
if param.kind in [
Parameter.POSITIONAL_OR_KEYWORD]
and param.default is Parameter.empty else
# Only pass the data kwarg if it is actually set, to avoid forcing
# third-party subclasses to support it.
'**({{"data": data}} if data is not None else {{}})'
if param.name == "data" else
'{0}={0}'
if param.kind in [
Parameter.POSITIONAL_OR_KEYWORD,
Parameter.KEYWORD_ONLY] else
'{0}'
if param.kind is Parameter.POSITIONAL_ONLY else
'*{0}'
if param.kind is Parameter.VAR_POSITIONAL else
'**{0}'
if param.kind is Parameter.VAR_KEYWORD else
None).format(param.name)
for param in params) + ')'
return_statement = 'return ' if has_return_value else ''
# Bail out in case of name collision.
for reserved in ('gca', 'gci', 'gcf', '__ret'):
if reserved in params:
raise ValueError(
f'Method {called_fullname} has kwarg named {reserved}')
return template.format(
name=name,
called_name=called_name,
signature=signature,
call=call,
return_statement=return_statement,
**kwargs)
def boilerplate_gen():
"""Generator of lines for the automated part of pyplot."""
_figure_commands = (
'figimage',
'figtext:text',
'gca',
'gci:_gci',
'ginput',
'subplots_adjust',
'suptitle',
'tight_layout',
'waitforbuttonpress',
)
# These methods are all simple wrappers of Axes methods by the same name.
_axes_commands = (
'acorr',
'angle_spectrum',
'annotate',
'arrow',
'autoscale',
'axhline',
'axhspan',
'axis',
'axline',
'axvline',
'axvspan',
'bar',
'barbs',
'barh',
'bar_label',
'boxplot',
'broken_barh',
'clabel',
'cohere',
'contour',
'contourf',
'csd',
'ecdf',
'errorbar',
'eventplot',
'fill',
'fill_between',
'fill_betweenx',
'grid',
'grouped_bar',
'hexbin',
'hist',
'stairs',
'hist2d',
'hlines',
'imshow',
'legend',
'locator_params',
'loglog',
'magnitude_spectrum',
'margins',
'minorticks_off',
'minorticks_on',
'pcolor',
'pcolormesh',
'phase_spectrum',
'pie',
'pie_label',
'plot',
'psd',
'quiver',
'quiverkey',
'scatter',
'semilogx',
'semilogy',
'specgram',
'spy',
'stackplot',
'stem',
'step',
'streamplot',
'table',
'text',
'tick_params',
'ticklabel_format',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'violinplot',
'vlines',
'xcorr',
# pyplot name : real name
'sci:_sci',
'title:set_title',
'xlabel:set_xlabel',
'ylabel:set_ylabel',
'xscale:set_xscale',
'yscale:set_yscale',
)
cmappable = {
'contour': (
'if __ret._A is not None: # type: ignore[attr-defined]\n'
' sci(__ret)'
),
'contourf': (
'if __ret._A is not None: # type: ignore[attr-defined]\n'
' sci(__ret)'
),
'hexbin': 'sci(__ret)',
'scatter': 'sci(__ret)',
'pcolor': 'sci(__ret)',
'pcolormesh': 'sci(__ret)',
'hist2d': 'sci(__ret[-1])',
'imshow': 'sci(__ret)',
'spy': (
'if isinstance(__ret, _ColorizerInterface):\n'
' sci(__ret)'
),
'quiver': 'sci(__ret)',
'specgram': 'sci(__ret[-1])',
'streamplot': 'sci(__ret.lines)',
'tricontour': (
'if __ret._A is not None: # type: ignore[attr-defined]\n'
' sci(__ret)'
),
'tricontourf': (
'if __ret._A is not None: # type: ignore[attr-defined]\n'
' sci(__ret)'
),
'tripcolor': 'sci(__ret)',
}
for spec in _figure_commands:
if ':' in spec:
name, called_name = spec.split(':')
else:
name = called_name = spec
yield generate_function(name, f'Figure.{called_name}',
FIGURE_METHOD_TEMPLATE)
for spec in _axes_commands:
if ':' in spec:
name, called_name = spec.split(':')
else:
name = called_name = spec
template = (AXES_CMAPPABLE_METHOD_TEMPLATE if name in cmappable else
AXES_METHOD_TEMPLATE)
yield generate_function(name, f'Axes.{called_name}', template,
sci_command=cmappable.get(name))
cmaps = (
'autumn',
'bone',
'cool',
'copper',
'flag',
'gray',
'hot',
'hsv',
'jet',
'pink',
'prism',
'spring',
'summer',
'winter',
'magma',
'inferno',
'plasma',
'viridis',
"nipy_spectral"
)
# add all the colormaps (autumn, hsv, ....)
for name in cmaps:
yield AUTOGEN_MSG
yield CMAP_TEMPLATE.format(name=name)
def build_pyplot(pyplot_path):
pyplot_orig = pyplot_path.read_text().splitlines(keepends=True)
try:
pyplot_orig = pyplot_orig[:pyplot_orig.index(PYPLOT_MAGIC_HEADER) + 1]
except IndexError as err:
raise ValueError('The pyplot.py file *must* have the exact line: %s'
% PYPLOT_MAGIC_HEADER) from err
with pyplot_path.open('w') as pyplot:
pyplot.writelines(pyplot_orig)
pyplot.writelines(boilerplate_gen())
# Run black to autoformat pyplot
subprocess.run(
[sys.executable, "-m", "black", "--line-length=88", pyplot_path],
check=True
)
### Methods for retrieving signatures from pyi stub files
def get_ast_tree(cls):
path = Path(inspect.getfile(cls))
stubpath = path.with_suffix(".pyi")
path = stubpath if stubpath.exists() else path
tree = ast.parse(path.read_text())
for item in tree.body:
if isinstance(item, ast.ClassDef) and item.name == cls.__name__:
return item
raise ValueError(f"Cannot find {cls.__name__} in ast")
@functools.lru_cache
def get_ast_mro_trees(cls):
return [get_ast_tree(c) for c in cls.__mro__ if c.__module__ != "builtins"]
def get_matching_signature(method, trees):
sig = inspect.signature(method)
for tree in trees:
for item in tree.body:
if not isinstance(item, ast.FunctionDef):
continue
if item.name == method.__name__:
return update_sig_from_node(item, sig)
# The following methods are implemented outside of the mro of Axes
# and thus do not get their annotated versions found with current code
# stackplot
# streamplot
# table
# tricontour
# tricontourf
# tripcolor
# triplot
# import warnings
# warnings.warn(f"'{method.__name__}' not found")
return sig
def update_sig_from_node(node, sig):
params = dict(sig.parameters)
args = node.args
allargs = (
*args.posonlyargs,
*args.args,
args.vararg,
*args.kwonlyargs,
args.kwarg,
)
for param in allargs:
if param is None:
continue
if param.annotation is None:
continue
annotation = direct_repr(ast.unparse(param.annotation))
params[param.arg] = params[param.arg].replace(annotation=annotation)
if node.returns is not None:
return inspect.Signature(
params.values(),
return_annotation=direct_repr(ast.unparse(node.returns))
)
else:
return inspect.Signature(params.values())
if __name__ == '__main__':
# Write the matplotlib.pyplot file.
if len(sys.argv) > 1:
pyplot_path = Path(sys.argv[1])
else:
mpl_path = (Path(__file__).parent / ".." /"lib"/"matplotlib").resolve()
pyplot_path = mpl_path / "pyplot.py"
for cls in [Axes, Figure]:
if mpl_path not in Path(inspect.getfile(cls)).parents:
raise RuntimeError(
f"{cls.__name__} import path is not {mpl_path}.\n"
"Please make sure your Matplotlib installation "
"is from the same source checkout as boilerplate.py"
)
build_pyplot(pyplot_path)
| direct_repr |
python | doocs__leetcode | solution/3200-3299/3282.Reach End of Array With Max Score/Solution.py | {
"start": 0,
"end": 190
} | class ____:
def findMaximumScore(self, nums: List[int]) -> int:
ans = mx = 0
for x in nums[:-1]:
mx = max(mx, x)
ans += mx
return ans
| Solution |
python | walkccc__LeetCode | solutions/3448. Count Substrings Divisible By Last Digit/3448.py | {
"start": 0,
"end": 538
} | class ____:
def countSubstrings(self, s: str) -> int:
ans = 0
# dp[i][num][rem] := the number of first `i` digits of s that have a
# remainder of `rem` when divided by `num`
dp = [[[0] * 10 for _ in range(10)] for _ in range(len(s) + 1)]
for i in range(1, len(s) + 1):
digit = int(s[i - 1])
for num in range(1, 10):
for rem in range(num):
dp[i][num][(rem * 10 + digit) % num] += dp[i - 1][num][rem]
dp[i][num][digit % num] += 1
ans += dp[i][digit][0]
return ans
| Solution |
python | django-haystack__django-haystack | test_haystack/core/models.py | {
"start": 789,
"end": 993
} | class ____(models.Model):
author = models.CharField(max_length=255)
pub_date = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return self.author
| AnotherMockModel |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 40589,
"end": 43840
} | class ____(FlavaPreTrainedModel):
config: FlavaMultimodalConfig
# This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints.
base_model_prefix = "flava.multimodal_model"
main_input_name = "hidden_states"
def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.use_cls_token = self.config.use_cls_token
if self.use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
The concatenated hidden states of unimodal encoders.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length, _ = hidden_states.size()
if self.use_cls_token:
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
seq_length += 1
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, (batch_size, seq_length), hidden_states.device
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring
| FlavaMultimodalModel |
python | wandb__wandb | wandb/sdk/launch/agent/config.py | {
"start": 1843,
"end": 2592
} | class ____(BaseModel):
"""Configuration for the environment block."""
type: Optional[EnvironmentType] = Field(
None,
description="The type of environment to use.",
)
region: Optional[str] = Field(..., description="The region to use.")
class Config:
extra = "allow"
@root_validator(pre=True) # type: ignore
@classmethod
def check_extra_fields(cls, values: dict) -> dict:
"""Check for extra fields and print a warning."""
for key in values:
if key not in ["type", "region"]:
wandb.termwarn(
f"Unrecognized field {key} in environment block. Please check your config file."
)
return values
| EnvironmentConfig |
python | django__django | tests/admin_views/test_password_form.py | {
"start": 238,
"end": 5612
} | class ____(AdminSeleniumTestCase):
available_apps = AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="super@example.com",
)
def test_add_new_user(self):
"""A user with no password can be added.
Enabling/disabling the usable password field shows/hides the password
fields when adding a user.
"""
from selenium.common import NoSuchElementException
from selenium.webdriver.common.by import By
user_add_url = reverse("auth_test_admin:auth_user_add")
self.admin_login(username="super", password="secret")
self.selenium.get(self.live_server_url + user_add_url)
pw_switch_on = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="usable_password"][value="true"]'
)
pw_switch_off = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="usable_password"][value="false"]'
)
password1 = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="password1"]'
)
password2 = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="password2"]'
)
# Default is to set a password on user creation.
self.assertIs(pw_switch_on.is_selected(), True)
self.assertIs(pw_switch_off.is_selected(), False)
# The password fields are visible.
self.assertIs(password1.is_displayed(), True)
self.assertIs(password2.is_displayed(), True)
# Click to disable password-based authentication.
pw_switch_off.click()
# Radio buttons are updated accordingly.
self.assertIs(pw_switch_on.is_selected(), False)
self.assertIs(pw_switch_off.is_selected(), True)
# The password fields are hidden.
self.assertIs(password1.is_displayed(), False)
self.assertIs(password2.is_displayed(), False)
# The warning message should not be shown.
with self.assertRaises(NoSuchElementException):
self.selenium.find_element(By.ID, "id_unusable_warning")
def test_change_password_for_existing_user(self):
"""A user can have their password changed or unset.
Enabling/disabling the usable password field shows/hides the password
fields and the warning about password lost.
"""
from selenium.webdriver.common.by import By
user = User.objects.create_user(
username="ada", password="charles", email="ada@example.com"
)
user_url = reverse("auth_test_admin:auth_user_password_change", args=(user.pk,))
self.admin_login(username="super", password="secret")
self.selenium.get(self.live_server_url + user_url)
pw_switch_on = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="usable_password"][value="true"]'
)
pw_switch_off = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="usable_password"][value="false"]'
)
password1 = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="password1"]'
)
password2 = self.selenium.find_element(
By.CSS_SELECTOR, 'input[name="password2"]'
)
submit_set = self.selenium.find_element(
By.CSS_SELECTOR, 'input[type="submit"].set-password'
)
submit_unset = self.selenium.find_element(
By.CSS_SELECTOR, 'input[type="submit"].unset-password'
)
# By default password-based authentication is enabled.
self.assertIs(pw_switch_on.is_selected(), True)
self.assertIs(pw_switch_off.is_selected(), False)
# The password fields are visible.
self.assertIs(password1.is_displayed(), True)
self.assertIs(password2.is_displayed(), True)
# Only the set password submit button is visible.
self.assertIs(submit_set.is_displayed(), True)
self.assertIs(submit_unset.is_displayed(), False)
# Click to disable password-based authentication.
pw_switch_off.click()
# Radio buttons are updated accordingly.
self.assertIs(pw_switch_on.is_selected(), False)
self.assertIs(pw_switch_off.is_selected(), True)
# The password fields are hidden.
self.assertIs(password1.is_displayed(), False)
self.assertIs(password2.is_displayed(), False)
# Only the unset password submit button is visible.
self.assertIs(submit_unset.is_displayed(), True)
self.assertIs(submit_set.is_displayed(), False)
# The warning about password being lost is shown.
warning = self.selenium.find_element(By.ID, "id_unusable_warning")
self.assertIs(warning.is_displayed(), True)
# Click to enable password-based authentication.
pw_switch_on.click()
# The warning disappears.
self.assertIs(warning.is_displayed(), False)
# The password fields are shown.
self.assertIs(password1.is_displayed(), True)
self.assertIs(password2.is_displayed(), True)
# Only the set password submit button is visible.
self.assertIs(submit_set.is_displayed(), True)
self.assertIs(submit_unset.is_displayed(), False)
| SeleniumAuthTests |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_kms.py | {
"start": 1767,
"end": 5245
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_init,
):
self.kms_hook = CloudKMSHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.kms.CloudKMSHook.get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.kms.KeyManagementServiceClient")
def test_kms_client_creation(self, mock_client, mock_get_creds):
result = self.kms_hook.get_conn()
mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO)
assert mock_client.return_value == result
assert self.kms_hook._conn == result
@mock.patch("airflow.providers.google.cloud.hooks.kms.CloudKMSHook.get_conn")
def test_encrypt(self, mock_get_conn):
mock_get_conn.return_value.encrypt.return_value = RESPONSE
result = self.kms_hook.encrypt(TEST_KEY_ID, PLAINTEXT)
mock_get_conn.assert_called_once_with()
mock_get_conn.return_value.encrypt.assert_called_once_with(
request=dict(
name=TEST_KEY_ID,
plaintext=PLAINTEXT,
additional_authenticated_data=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert PLAINTEXT_b64 == result
@mock.patch("airflow.providers.google.cloud.hooks.kms.CloudKMSHook.get_conn")
def test_encrypt_with_auth_data(self, mock_get_conn):
mock_get_conn.return_value.encrypt.return_value = RESPONSE
result = self.kms_hook.encrypt(TEST_KEY_ID, PLAINTEXT, AUTH_DATA)
mock_get_conn.assert_called_once_with()
mock_get_conn.return_value.encrypt.assert_called_once_with(
request=dict(
name=TEST_KEY_ID,
plaintext=PLAINTEXT,
additional_authenticated_data=AUTH_DATA,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert PLAINTEXT_b64 == result
@mock.patch("airflow.providers.google.cloud.hooks.kms.CloudKMSHook.get_conn")
def test_decrypt(self, mock_get_conn):
mock_get_conn.return_value.decrypt.return_value = RESPONSE
result = self.kms_hook.decrypt(TEST_KEY_ID, CIPHERTEXT_b64)
mock_get_conn.assert_called_once_with()
mock_get_conn.return_value.decrypt.assert_called_once_with(
request=dict(
name=TEST_KEY_ID,
ciphertext=CIPHERTEXT,
additional_authenticated_data=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert result == PLAINTEXT
@mock.patch("airflow.providers.google.cloud.hooks.kms.CloudKMSHook.get_conn")
def test_decrypt_with_auth_data(self, mock_get_conn):
mock_get_conn.return_value.decrypt.return_value = RESPONSE
result = self.kms_hook.decrypt(TEST_KEY_ID, CIPHERTEXT_b64, AUTH_DATA)
mock_get_conn.assert_called_once_with()
mock_get_conn.return_value.decrypt.assert_called_once_with(
request=dict(
name=TEST_KEY_ID,
ciphertext=CIPHERTEXT,
additional_authenticated_data=AUTH_DATA,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
assert result == PLAINTEXT
| TestCloudKMSHook |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-fauna/source_fauna/source.py | {
"start": 1273,
"end": 1536
} | class ____:
def __init__(self, conf):
# The page size, used in all Paginate() calls.
self.page_size = conf["page_size"]
# Configs for how deletions are handled
self.deletions = DeletionsConfig(conf["deletions"])
| CollectionConfig |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/file.py | {
"start": 3675,
"end": 6614
} | class ____(StrParameterItem):
def __init__(self, param, depth):
self._value = None
super().__init__(param, depth)
button = QtWidgets.QPushButton('...')
button.setFixedWidth(25)
button.setContentsMargins(0, 0, 0, 0)
button.clicked.connect(self._retrieveFileSelection_gui)
self.layoutWidget.layout().insertWidget(2, button)
self.displayLabel.resizeEvent = self._newResizeEvent
# self.layoutWidget.layout().insertWidget(3, self.defaultBtn)
def makeWidget(self):
w = super().makeWidget()
w.setValue = self.setValue
w.value = self.value
# Doesn't make much sense to have a 'changing' signal since filepaths should be complete before value
# is emitted
delattr(w, 'sigChanging')
return w
def _newResizeEvent(self, ev):
ret = type(self.displayLabel).resizeEvent(self.displayLabel, ev)
self.updateDisplayLabel()
return ret
def setValue(self, value):
self._value = value
self.widget.setText(str(value))
def value(self):
return self._value
def _retrieveFileSelection_gui(self):
curVal = self.param.value() if self.param.hasValue() else None
if isinstance(curVal, list) and len(curVal):
# All files should be from the same directory, in principle
# Since no mechanism exists for preselecting multiple, the most sensible
# thing is to select nothing in the preview dialog
curVal = curVal[0]
if os.path.isfile(curVal):
curVal = os.path.dirname(curVal)
opts = self.param.opts.copy()
useDir = curVal or opts.get('directory') or os.getcwd()
startDir = os.path.abspath(useDir)
if os.path.isfile(startDir):
opts['selectFile'] = os.path.basename(startDir)
startDir = os.path.dirname(startDir)
if os.path.exists(startDir):
opts['directory'] = startDir
if opts.get('windowTitle') is None:
opts['windowTitle'] = self.param.title()
if fname := popupFilePicker(None, **opts):
self.param.setValue(fname)
def updateDefaultBtn(self):
# Override since a readonly label should still allow reverting to default
## enable/disable default btn
self.defaultBtn.setEnabled(
not self.param.valueIsDefault() and self.param.opts['enabled'])
# hide / show
self.defaultBtn.setVisible(self.param.hasDefault())
def updateDisplayLabel(self, value=None):
lbl = self.displayLabel
if value is None:
value = self.param.value()
value = str(value)
font = lbl.font()
metrics = QtGui.QFontMetricsF(font)
value = metrics.elidedText(value, QtCore.Qt.TextElideMode.ElideLeft, lbl.width()-5)
return super().updateDisplayLabel(value)
| FileParameterItem |
python | tensorflow__tensorflow | tensorflow/compiler/tests/xla_test.py | {
"start": 2832,
"end": 3731
} | class ____(session.Session):
"""Tensorflow session that runs tpu.rewrite() on ops on run()."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.topology = None
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
from tensorflow.python.tpu import tpu # pylint: disable=g-import-not-at-top
if self.topology is None:
self.topology = super().run(tpu.initialize_system())
assert self.topology is not None
fetch_mapper = session._FetchMapper.for_fetch(fetches)
new_fetches = []
for fetch in fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
fetch = tpu.rewrite(lambda fetch=fetch: fetch)
new_fetches.append(fetch)
rewritten_fetches = fetch_mapper.build_results(new_fetches)
return super().run(rewritten_fetches, feed_dict, options, run_metadata)
| TPURewriteSession |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-aws-datalake/destination_aws_datalake/aws.py | {
"start": 3880,
"end": 11053
} | class ____:
def __init__(self, connector_config: ConnectorConfig, destination: Destination) -> None:
self._config: ConnectorConfig = connector_config
self._destination: Destination = destination
self._session: boto3.Session = None
self.create_session()
self.glue_client = self._session.client("glue")
self.s3_client = self._session.client("s3")
self.lf_client = self._session.client("lakeformation")
self._table_type = "GOVERNED" if self._config.lakeformation_governed_tables else "EXTERNAL_TABLE"
@retry(stop_max_attempt_number=10, wait_random_min=1000, wait_random_max=2000)
def create_session(self) -> None:
if self._config.credentials_type == CredentialsType.IAM_USER:
self._session = boto3.Session(
aws_access_key_id=self._config.aws_access_key,
aws_secret_access_key=self._config.aws_secret_key,
region_name=self._config.region,
)
elif self._config.credentials_type == CredentialsType.IAM_ROLE:
botocore_session = AssumeRoleProvider.assume_role_refreshable(
session=botocore.session.Session(), role_arn=self._config.role_arn, session_name="airbyte-destination-aws-datalake"
)
self._session = boto3.session.Session(region_name=self._config.region, botocore_session=botocore_session)
def _get_s3_path(self, database: str, table: str) -> str:
bucket = f"s3://{self._config.bucket_name}"
if self._config.bucket_prefix:
bucket += f"/{self._config.bucket_prefix}"
return f"{bucket}/{database}/{table}/"
def _get_compression_type(self, compression: CompressionCodec) -> Optional[str]:
if compression == CompressionCodec.GZIP:
return "gzip"
elif compression == CompressionCodec.SNAPPY:
return "snappy"
elif compression == CompressionCodec.ZSTD:
return "zstd"
else:
return None
def _write_parquet(
self,
df: pd.DataFrame,
path: str,
database: str,
table: str,
mode: str,
dtype: Optional[Dict[str, str]],
partition_cols: list = None,
) -> Any:
return wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=database,
table=table,
glue_table_settings={
"table_type": self._table_type,
},
mode=mode,
use_threads=False, # True causes s3 NoCredentialsError error
catalog_versioning=True,
boto3_session=self._session,
partition_cols=partition_cols,
compression=self._get_compression_type(self._config.compression_codec),
dtype=dtype,
)
def _write_json(
self,
df: pd.DataFrame,
path: str,
database: str,
table: str,
mode: str,
dtype: Optional[Dict[str, str]],
partition_cols: list = None,
) -> Any:
return wr.s3.to_json(
df=df,
path=path,
dataset=True,
database=database,
table=table,
glue_table_settings={
"table_type": self._table_type,
},
mode=mode,
use_threads=False, # True causes s3 NoCredentialsError error
orient="records",
lines=True,
catalog_versioning=True,
boto3_session=self._session,
partition_cols=partition_cols,
dtype=dtype,
compression=self._get_compression_type(self._config.compression_codec),
)
def _write(
self, df: pd.DataFrame, path: str, database: str, table: str, mode: str, dtype: Dict[str, str], partition_cols: list = None
) -> Any:
self._create_database_if_not_exists(database)
if self._config.format_type == OutputFormat.JSONL:
return self._write_json(df, path, database, table, mode, dtype, partition_cols)
elif self._config.format_type == OutputFormat.PARQUET:
return self._write_parquet(df, path, database, table, mode, dtype, partition_cols)
else:
raise Exception(f"Unsupported output format: {self._config.format_type}")
def _create_database_if_not_exists(self, database: str) -> None:
tag_key = self._config.lakeformation_database_default_tag_key
tag_values = self._config.lakeformation_database_default_tag_values
wr.catalog.create_database(name=database, boto3_session=self._session, exist_ok=True)
if tag_key and tag_values:
self.lf_client.add_lf_tags_to_resource(
Resource={
"Database": {"Name": database},
},
LFTags=[{"TagKey": tag_key, "TagValues": tag_values.split(",")}],
)
@retry(stop_max_attempt_number=10, wait_random_min=2000, wait_random_max=3000)
def head_bucket(self):
return self.s3_client.head_bucket(Bucket=self._config.bucket_name)
def table_exists(self, database: str, table: str) -> bool:
try:
self.glue_client.get_table(DatabaseName=database, Name=table)
return True
except ClientError:
return False
def delete_table(self, database: str, table: str) -> bool:
logger.info(f"Deleting table {database}.{table}")
return wr.catalog.delete_table_if_exists(database=database, table=table, boto3_session=self._session)
def delete_table_objects(self, database: str, table: str) -> None:
path = self._get_s3_path(database, table)
logger.info(f"Deleting objects in {path}")
return wr.s3.delete_objects(path=path, boto3_session=self._session)
def reset_table(self, database: str, table: str) -> None:
logger.info(f"Resetting table {database}.{table}")
if self.table_exists(database, table):
self.delete_table(database, table)
self.delete_table_objects(database, table)
def write(self, df: pd.DataFrame, database: str, table: str, dtype: Dict[str, str], partition_cols: list):
path = self._get_s3_path(database, table)
return self._write(
df,
path,
database,
table,
"overwrite",
dtype,
partition_cols,
)
def append(self, df: pd.DataFrame, database: str, table: str, dtype: Dict[str, str], partition_cols: list):
path = self._get_s3_path(database, table)
return self._write(
df,
path,
database,
table,
"append",
dtype,
partition_cols,
)
def upsert(self, df: pd.DataFrame, database: str, table: str, dtype: Dict[str, str], partition_cols: list):
path = self._get_s3_path(database, table)
return self._write(
df,
path,
database,
table,
"overwrite_partitions",
dtype,
partition_cols,
)
| AwsHandler |
python | pypa__hatch | tests/utils/test_runner.py | {
"start": 751,
"end": 4067
} | class ____:
def test_empty(self):
assert select_environments({}, {}, {}) == []
def test_no_filters(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {}, {}) == ["a", "b", "c", "d"]
def test_include_any(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {"version": set()}, {}) == ["d"]
def test_include_specific(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {"python": {"3.11"}}, {}) == ["c", "d"]
def test_include_multiple(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {"python": {"3.11"}, "feature": {"baz"}}, {}) == ["c"]
def test_exclude_any(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {}, {"version": set()}) == ["a", "b", "c"]
def test_exclude_specific(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {}, {"python": {"3.11"}}) == ["a", "b"]
def test_exclude_multiple(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {}, {"python": {"3.11"}, "feature": {"baz"}}) == ["a", "b"]
def test_include_and_exclude(self):
environments = {
"a": {"python": "3.9", "feature": "foo"},
"b": {"python": "3.10", "feature": "bar"},
"c": {"python": "3.11", "feature": "baz"},
"d": {"python": "3.11", "feature": "foo", "version": "42"},
}
assert select_environments(environments, {"python": {"3.11"}}, {"feature": {"baz"}}) == ["d"]
| TestSelectEnvironments |
python | pydantic__pydantic | pydantic-core/tests/validators/test_dataclasses.py | {
"start": 41263,
"end": 46844
} | class ____:
foo: Optional[FooDataclass]
def test_custom_dataclass_names():
# Note: normally you would use the same values for DataclassArgsSchema.dataclass_name and DataclassSchema.cls_name,
# but I have purposely made them different here to show which parts of the errors are affected by which.
# I have used square brackets in the names to hint that the most likely reason for using a value different from
# cls.__name__ is for use with generic types.
schema = core_schema.dataclass_schema(
FooParentDataclass,
core_schema.dataclass_args_schema(
'FooParentDataclass',
[
core_schema.dataclass_field(
name='foo',
schema=core_schema.union_schema(
[
core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass[dataclass_args_schema]',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
),
['a', 'b'],
cls_name='FooDataclass[cls_name]',
),
core_schema.none_schema(),
]
),
)
],
),
['foo'],
)
v = SchemaValidator(schema)
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'foo': 123})
assert exc_info.value.errors(include_url=False) == [
{
'ctx': {'class_name': 'FooDataclass[dataclass_args_schema]'},
'input': 123,
'loc': ('foo', 'FooDataclass[cls_name]'),
'msg': 'Input should be a dictionary or an instance of FooDataclass[dataclass_args_schema]',
'type': 'dataclass_type',
},
{'input': 123, 'loc': ('foo', 'none'), 'msg': 'Input should be None', 'type': 'none_required'},
]
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python >= 3.10')
def test_slots() -> None:
@dataclasses.dataclass(slots=True)
class Model:
x: int
schema = core_schema.dataclass_schema(
Model,
core_schema.dataclass_args_schema(
'Model', [core_schema.dataclass_field(name='x', schema=core_schema.int_schema())]
),
['x'],
slots=True,
)
val = SchemaValidator(schema)
m: Model
m = val.validate_python({'x': 123})
assert m == Model(x=123)
with pytest.raises(ValidationError):
val.validate_python({'x': 'abc'})
val.validate_assignment(m, 'x', 456)
assert m.x == 456
with pytest.raises(ValidationError):
val.validate_assignment(m, 'x', 'abc')
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python >= 3.10')
def test_dataclass_slots_field_before_validator():
@dataclasses.dataclass(slots=True)
class Foo:
a: int
b: str
@classmethod
def validate_b(cls, v: bytes, info: core_schema.ValidationInfo) -> bytes:
assert v == b'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return b'hello world!'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b',
schema=core_schema.with_info_before_validator_function(Foo.validate_b, core_schema.str_schema()),
),
],
),
['a', 'b'],
slots=True,
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python >= 3.10')
def test_dataclass_slots_field_after_validator():
@dataclasses.dataclass(slots=True)
class Foo:
a: int
b: str
@classmethod
def validate_b(cls, v: str, info: core_schema.ValidationInfo) -> str:
assert v == 'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return 'hello world!'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b',
schema=core_schema.with_info_after_validator_function(Foo.validate_b, core_schema.str_schema()),
),
],
),
['a', 'b'],
slots=True,
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
if sys.version_info < (3, 10):
kwargs = {}
else:
kwargs = {'slots': True}
@dataclasses.dataclass(**kwargs)
| FooParentDataclass |
python | RaRe-Technologies__gensim | gensim/test/test_lee.py | {
"start": 1171,
"end": 4160
} | class ____(unittest.TestCase):
def setUp(self):
"""setup lee test corpora"""
global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2
bg_corpus_file = datapath('lee_background.cor')
corpus_file = datapath('lee.cor')
sim_file = datapath('similarities0-1.txt')
# read in the corpora
latin1 = partial(utils.to_unicode, encoding='latin1')
with utils.open(bg_corpus_file, 'rb') as f:
bg_corpus = preprocess_documents(latin1(line) for line in f)
with utils.open(corpus_file, 'rb') as f:
corpus = preprocess_documents(latin1(line) for line in f)
with utils.open(bg_corpus_file, 'rb') as f:
bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
with utils.open(corpus_file, 'rb') as f:
corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
# read the human similarity data
sim_matrix = np.loadtxt(sim_file)
sim_m_size = np.shape(sim_matrix)[0]
human_sim_vector = sim_matrix[np.triu_indices(sim_m_size, 1)]
def test_corpus(self):
"""availability and integrity of corpus"""
documents_in_bg_corpus = 300
documents_in_corpus = 50
len_sim_vector = 1225
self.assertEqual(len(bg_corpus), documents_in_bg_corpus)
self.assertEqual(len(corpus), documents_in_corpus)
self.assertEqual(len(human_sim_vector), len_sim_vector)
def test_lee(self):
"""correlation with human data > 0.6
(this is the value which was achieved in the original paper)
"""
global bg_corpus, corpus
# create a dictionary and corpus (bag of words)
dictionary = corpora.Dictionary(bg_corpus)
bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]
corpus = [dictionary.doc2bow(text) for text in corpus]
# transform the bag of words with log_entropy normalization
log_ent = models.LogEntropyModel(bg_corpus)
bg_corpus_ent = log_ent[bg_corpus]
# initialize an LSI transformation from background corpus
lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)
# transform small corpus to lsi bow->log_ent->fold-in-lsi
corpus_lsi = lsi[log_ent[corpus]]
# compute pairwise similarity matrix and extract upper triangular
res = np.zeros((len(corpus), len(corpus)))
for i, par1 in enumerate(corpus_lsi):
for j, par2 in enumerate(corpus_lsi):
res[i, j] = matutils.cossim(par1, par2)
flat = res[np.triu_indices(len(corpus), 1)]
cor = np.corrcoef(flat, human_sim_vector)[0, 1]
logging.info("LSI correlation coefficient is %s", cor)
self.assertTrue(cor > 0.6)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestLeeTest |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 7174,
"end": 7289
} | class ____(Message):
message = "'raise NotImplemented' should be 'raise NotImplementedError'"
| RaiseNotImplemented |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/events.py | {
"start": 28703,
"end": 31881
} | class ____(
NamedTuple(
"_ObjectStoreOperation",
[
("op", ObjectStoreOperationType),
("key", str),
("dest_key", Optional[str]),
("obj", Any),
("serialization_strategy_name", Optional[str]),
("object_store_name", Optional[str]),
("value_name", Optional[str]),
("version", Optional[str]),
("mapping_key", Optional[str]),
],
)
):
"""This event is used internally by Dagster machinery when values are written to and read from
an ObjectStore.
Users should not import this class or yield events of this type from user code.
Args:
op (ObjectStoreOperationType): The type of the operation on the object store.
key (str): The key of the object on which the operation was performed.
dest_key (Optional[str]): The destination key, if any, to which the object was copied.
obj (Any): The object, if any, retrieved by the operation.
serialization_strategy_name (Optional[str]): The name of the serialization strategy, if any,
employed by the operation
object_store_name (Optional[str]): The name of the object store that performed the
operation.
value_name (Optional[str]): The name of the input/output
version (Optional[str]): The version of the stored data.
mapping_key (Optional[str]): The mapping key when a dynamic output is used.
"""
def __new__(
cls,
op: ObjectStoreOperationType,
key: str,
dest_key: Optional[str] = None,
obj: Any = None,
serialization_strategy_name: Optional[str] = None,
object_store_name: Optional[str] = None,
value_name: Optional[str] = None,
version: Optional[str] = None,
mapping_key: Optional[str] = None,
):
return super().__new__(
cls,
op=op,
key=check.str_param(key, "key"),
dest_key=check.opt_str_param(dest_key, "dest_key"),
obj=obj,
serialization_strategy_name=check.opt_str_param(
serialization_strategy_name, "serialization_strategy_name"
),
object_store_name=check.opt_str_param(object_store_name, "object_store_name"),
value_name=check.opt_str_param(value_name, "value_name"),
version=check.opt_str_param(version, "version"),
mapping_key=check.opt_str_param(mapping_key, "mapping_key"),
)
@classmethod
def serializable(cls, inst, **kwargs):
return cls(
**dict(
{ # pyright: ignore[reportArgumentType]
"op": inst.op.value,
"key": inst.key,
"dest_key": inst.dest_key,
"obj": None,
"serialization_strategy_name": inst.serialization_strategy_name,
"object_store_name": inst.object_store_name,
"value_name": inst.value_name,
"version": inst.version,
},
**kwargs,
)
)
| ObjectStoreOperation |
python | django__django | tests/migrations/test_migrations_squashed_double/0003_squashed_0001_and_0002.py | {
"start": 43,
"end": 641
} | class ____(migrations.Migration):
replaces = [("migrations", "0001_initial"), ("migrations", "0002_auto")]
operations = [
migrations.CreateModel(
name="A",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("foo", models.BooleanField(default=True)),
],
),
]
| Migration |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_cloud_composer.py | {
"start": 9197,
"end": 16203
} | class ____:
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_wait_ready(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = TEST_GET_TASK_INSTANCES_RESULT(
"success",
"execution_date" if composer_airflow_version < 3 else "logical_date",
TEST_COMPOSER_EXTERNAL_TASK_ID,
)
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_wait_not_ready(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = TEST_GET_TASK_INSTANCES_RESULT(
"running",
"execution_date" if composer_airflow_version < 3 else "logical_date",
TEST_COMPOSER_EXTERNAL_TASK_ID,
)
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert not task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_task_instances_empty(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = {
"task_instances": [],
"total_entries": 0,
}
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert not task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_composer_external_task_id_wait_ready(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = TEST_GET_TASK_INSTANCES_RESULT(
"success",
"execution_date" if composer_airflow_version < 3 else "logical_date",
TEST_COMPOSER_EXTERNAL_TASK_ID,
)
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
composer_external_task_id=TEST_COMPOSER_EXTERNAL_TASK_ID,
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_composer_external_task_id_wait_not_ready(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = TEST_GET_TASK_INSTANCES_RESULT(
"running",
"execution_date" if composer_airflow_version < 3 else "logical_date",
TEST_COMPOSER_EXTERNAL_TASK_ID,
)
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
composer_external_task_id=TEST_COMPOSER_EXTERNAL_TASK_ID,
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert not task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_composer_external_task_group_id_wait_ready(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = TEST_GET_TASK_INSTANCES_RESULT(
"success",
"execution_date" if composer_airflow_version < 3 else "logical_date",
f"{TEST_COMPOSER_EXTERNAL_TASK_GROUP_ID}.{TEST_COMPOSER_EXTERNAL_TASK_ID}",
)
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
composer_external_task_group_id=TEST_COMPOSER_EXTERNAL_TASK_GROUP_ID,
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
@pytest.mark.parametrize("composer_airflow_version", [2, 3])
@mock.patch("airflow.providers.google.cloud.sensors.cloud_composer.CloudComposerHook")
def test_composer_external_task_group_id_wait_not_ready(self, mock_hook, composer_airflow_version):
mock_hook.return_value.get_task_instances.return_value = TEST_GET_TASK_INSTANCES_RESULT(
"running",
"execution_date" if composer_airflow_version < 3 else "logical_date",
f"{TEST_COMPOSER_EXTERNAL_TASK_GROUP_ID}.{TEST_COMPOSER_EXTERNAL_TASK_ID}",
)
task = CloudComposerExternalTaskSensor(
task_id="task-id",
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
environment_id=TEST_ENVIRONMENT_ID,
composer_external_dag_id="test_dag_id",
composer_external_task_group_id=TEST_COMPOSER_EXTERNAL_TASK_GROUP_ID,
allowed_states=["success"],
)
task._composer_airflow_version = composer_airflow_version
assert not task.poke(context={"logical_date": datetime(2024, 5, 23, 0, 0, 0)})
| TestCloudComposerExternalTaskSensor |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 2318,
"end": 2495
} | class ____(Greeter, Overridden, enum.Enum):
"""this is enum class"""
x = 'x'
def override(self):
"""overridden"""
return 2
| EnumClassWithMixinEnumType |
python | getsentry__sentry | src/sentry/monitors/models.py | {
"start": 26009,
"end": 27786
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
monitor = FlexibleForeignKey("monitors.Monitor")
monitor_environment = FlexibleForeignKey("monitors.MonitorEnvironment")
starting_checkin = FlexibleForeignKey(
"monitors.MonitorCheckIn", null=True, related_name="created_incidents"
)
starting_timestamp = models.DateTimeField(null=True)
"""
This represents the first failed check-in that we receive
"""
resolving_checkin = FlexibleForeignKey(
"monitors.MonitorCheckIn", null=True, related_name="resolved_incidents"
)
resolving_timestamp = models.DateTimeField(null=True)
"""
This represents the final OK check-in that we receive
"""
grouphash = models.CharField(max_length=32, default=default_grouphash)
"""
Used for issue occurrences generation. Failed check-ins produce an
occurrence associated to this grouphash.
"""
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "monitors"
db_table = "sentry_monitorincident"
indexes = [
models.Index(fields=["monitor_environment", "resolving_checkin"]),
models.Index(
fields=["starting_timestamp"],
name="active_incident_idx",
condition=Q(resolving_checkin__isnull=True),
),
]
constraints = [
# Only allow for one active incident (no resolved check-in) per
# monitor environment
models.UniqueConstraint(
fields=["monitor_environment_id"],
name="unique_active_incident",
condition=Q(resolving_checkin__isnull=True),
),
]
@region_silo_model
| MonitorIncident |
python | getsentry__sentry | tests/sentry/ratelimits/utils/test_get_rate_limit_value.py | {
"start": 332,
"end": 4020
} | class ____(TestCase):
def test_default_rate_limit_values(self) -> None:
"""Ensure that the default rate limits are called for endpoints without overrides."""
class TestEndpoint(Endpoint):
pass
_test_endpoint = TestEndpoint.as_view()
rate_limit_config = get_rate_limit_config(_test_endpoint.view_class)
assert get_rate_limit_value(
"GET", RateLimitCategory.IP, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.IP)
assert get_rate_limit_value(
"POST", RateLimitCategory.ORGANIZATION, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.ORGANIZATION)
assert get_rate_limit_value(
"DELETE", RateLimitCategory.USER, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.USER)
def test_cli_group_rate_limit_values(self) -> None:
"""Ensure that the CLI Group has the correct rate limit defaults set"""
class TestEndpoint(Endpoint):
rate_limits = RateLimitConfig(group="CLI")
_test_endpoint = TestEndpoint.as_view()
rate_limit_config = get_rate_limit_config(_test_endpoint.view_class)
assert get_rate_limit_value(
"GET", RateLimitCategory.IP, rate_limit_config
) == get_default_rate_limits_for_group("CLI", RateLimitCategory.IP)
assert get_rate_limit_value(
"POST", RateLimitCategory.ORGANIZATION, rate_limit_config
) == get_default_rate_limits_for_group("CLI", RateLimitCategory.ORGANIZATION)
assert get_rate_limit_value(
"DELETE", RateLimitCategory.USER, rate_limit_config
) == get_default_rate_limits_for_group("CLI", RateLimitCategory.USER)
def test_override_rate_limit(self) -> None:
"""Override one or more of the default rate limits."""
class TestEndpoint(Endpoint):
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {RateLimitCategory.IP: RateLimit(limit=100, window=5)},
"POST": {RateLimitCategory.USER: RateLimit(limit=20, window=4)},
}
)
_test_endpoint = TestEndpoint.as_view()
rate_limit_config = get_rate_limit_config(_test_endpoint.view_class)
assert get_rate_limit_value("GET", RateLimitCategory.IP, rate_limit_config) == RateLimit(
100, 5
)
assert get_rate_limit_value(
"GET", RateLimitCategory.USER, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.USER)
assert get_rate_limit_value(
"POST", RateLimitCategory.IP, rate_limit_config
) == get_default_rate_limits_for_group("default", RateLimitCategory.IP)
assert get_rate_limit_value("POST", RateLimitCategory.USER, rate_limit_config) == RateLimit(
20, 4
)
def test_inherit(self) -> None:
class ParentEndpoint(Endpoint):
rate_limits = RateLimitConfig(
group="foo",
limit_overrides={"GET": {RateLimitCategory.IP: RateLimit(limit=100, window=5)}},
)
class ChildEndpoint(ParentEndpoint):
rate_limits = RateLimitConfig(group="foo", limit_overrides={"GET": {}})
_child_endpoint = ChildEndpoint.as_view()
rate_limit_config = get_rate_limit_config(_child_endpoint.view_class)
assert get_rate_limit_value(
"GET", RateLimitCategory.IP, rate_limit_config
) == get_default_rate_limits_for_group("foo", RateLimitCategory.IP)
| TestGetRateLimitValue |
python | ipython__ipython | IPython/utils/tempdir.py | {
"start": 1264,
"end": 1852
} | class ____(TemporaryDirectory):
"""
Creates a temporary directory and sets the cwd to that directory.
Automatically reverts to previous cwd upon cleanup.
Usage example:
with TemporaryWorkingDirectory() as tmpdir:
...
"""
def __enter__(self):
self.old_wd = Path.cwd()
_os.chdir(self.name)
return super(TemporaryWorkingDirectory, self).__enter__()
def __exit__(self, exc, value, tb):
_os.chdir(self.old_wd)
return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)
| TemporaryWorkingDirectory |
python | Lightning-AI__lightning | src/lightning/pytorch/callbacks/pruning.py | {
"start": 1995,
"end": 2078
} | class ____(TypedDict):
data: nn.Module
names: list[tuple[int, str]]
| _LayerRef |
python | facebook__pyre-check | tools/upgrade/configuration.py | {
"start": 600,
"end": 12881
} | class ____:
def __init__(
self, path: Path, json_contents: Optional[Dict[str, Any]] = None
) -> None:
if json_contents is None:
with open(path, "r") as configuration_file:
json_contents = json.load(configuration_file)
self._path: Path = path
if path.name == ".pyre_configuration.local":
self.is_local: bool = True
else:
self.is_local: bool = False
self.root: str = str(path.parent)
self.original_contents: Dict[str, Any] = json_contents
# Configuration fields
self.strict: Optional[bool] = json_contents.get("strict")
self.targets: Optional[List[str]] = json_contents.get("targets")
self.source_directories: Optional[List[str]] = json_contents.get(
"source_directories"
)
self.version: Optional[str] = json_contents.get("version")
self.pysa_version: Optional[str] = json_contents.get("pysa_version")
self.use_buck_builder: Optional[bool] = json_contents.get("use_buck_builder")
self.use_buck_source_database: Optional[bool] = json_contents.get(
"use_buck_source_database"
)
self.ignore_all_errors: Optional[List[str]] = json_contents.get(
"ignore_all_errors"
)
self.exclude: Optional[List[str]] = json_contents.get("exclude")
self.use_buck2: Optional[bool] = json_contents.get("use_buck2")
def get_contents(self) -> Dict[str, Any]:
"""Assumption: The field names in this class match the key names in
the configuration."""
contents: Dict[str, Any] = self.original_contents
def update_contents(key: str) -> None:
attribute = getattr(self, key)
if attribute is not None:
contents[key] = attribute
elif key in contents:
del contents[key]
update_contents("targets")
update_contents("source_directories")
update_contents("version")
update_contents("pysa_version")
update_contents("strict")
update_contents("use_buck_builder")
update_contents("use_buck_source_database")
update_contents("use_buck2")
return contents
@staticmethod
def find_parent_file(
filename: str, directory: Optional[Path] = None
) -> Optional[Path]:
directory = directory or Path.cwd()
root = directory.root
while directory != root:
configuration_path = directory / filename
if configuration_path.is_file():
return configuration_path
parent = directory.parent
if directory == parent:
return None
directory = parent
return None
@staticmethod
def find_project_configuration(directory: Optional[Path] = None) -> Path:
path = Configuration.find_parent_file(".pyre_configuration", directory)
if path is None:
raise UserError("No root with a `.pyre_configuration` found.")
return path
@staticmethod
def find_local_configuration(directory: Optional[Path] = None) -> Optional[Path]:
return Configuration.find_parent_file(".pyre_configuration.local", directory)
@staticmethod
def gather_local_configuration_paths(directory: str) -> Sequence[Path]:
return [
Path(path)
for path in get_filesystem().list(
directory, patterns=[r"**\.pyre_configuration.local"]
)
]
@staticmethod
def gather_local_configurations() -> List["Configuration"]:
LOG.info("Finding configurations...")
configuration_paths = Configuration.gather_local_configuration_paths(".")
if not configuration_paths:
LOG.info("No projects with local configurations found.")
return []
configurations = []
for configuration_path in configuration_paths:
with open(configuration_path) as configuration_file:
try:
configuration = Configuration(
configuration_path, json.load(configuration_file)
)
configurations.append(configuration)
except json.decoder.JSONDecodeError:
LOG.error(
"Configuration at `%s` is invalid, skipping.",
configuration_path,
)
LOG.info(
"Found %d local configuration%s.",
len(configurations),
"s" if len(configurations) != 1 else "",
)
return configurations
def get_path(self) -> Path:
return self._path
def get_source_paths(self) -> Generator[Path, None, None]:
# This is an approximation
return Path(self.root).glob("**/*.py")
def get_exclude_as_patterns(self) -> Set[re.Pattern[str]]:
if self.exclude is not None:
return {re.compile(pattern) for pattern in self.exclude}
else:
return set()
def _relative_path_from_prefix(self, prefix: str) -> Path:
root = Path(self.root)
if not prefix.startswith("//"):
return root / prefix
# strip preceding '//'
stripped_prefix = prefix[2:]
if self.is_local:
global_root = self.find_project_configuration(root).parent
return global_root / stripped_prefix
else:
return root / stripped_prefix
def get_resolved_ignore_path_prefixes(self) -> Set[Path]:
if self.ignore_all_errors is not None:
return {
self._relative_path_from_prefix(prefix).resolve()
for prefix in self.ignore_all_errors
}
else:
return set()
def get_directory(self) -> Path:
return self._path.parent
def write(self) -> None:
with open(self._path, "w") as configuration_file:
json.dump(self.get_contents(), configuration_file, sort_keys=True, indent=2)
configuration_file.write("\n")
def remove_version(self) -> None:
if not self.version:
LOG.info("Version not found in configuration.")
return
self.version = None
def delete(self) -> None:
self._path.unlink()
def set_version(self, version: str) -> None:
self.version = version
def set_pysa_version(self, pysa_version: str) -> None:
self.pysa_version = pysa_version
def enable_source_database_buck_builder(self) -> None:
self.use_buck_builder = True
self.use_buck_source_database = True
def set_use_buck1_if_possible(self) -> None:
if self.use_buck2 is None:
self.use_buck2 = False
def add_strict(self) -> None:
if self.strict:
LOG.info("Configuration is already strict.")
return
self.strict = True
# Pyre is now strict by default, so in most cases we want to
# remove the strict flag from the config file.
def use_strict_default(self) -> None:
self.strict = None
def add_targets(self, targets: List[str]) -> None:
existing_targets = self.targets
if existing_targets:
existing_targets.extend(targets)
else:
self.targets = targets
def has_single_wildcard_target(self) -> bool:
return (
self.targets is not None
and len(self.targets) == 1
and any(t.endswith("...") for t in self.targets)
)
def deduplicate_targets(self) -> None:
targets = self.targets
if not targets:
return
glob_targets = [target for target in targets if target.endswith("/...")]
non_glob_targets = [target for target in targets if not target.endswith("/...")]
all_targets = sorted(set(glob_targets)) + sorted(set(non_glob_targets))
deduplicated_targets = []
expanded_targets = set()
for target in all_targets:
if target.endswith("/...") or target.endswith(":"):
try:
expanded = (
subprocess.check_output(["buck2", "query", target])
.decode()
.strip()
.split("\n")
)
if not all(target in expanded_targets for target in expanded):
expanded_targets.update(expanded)
deduplicated_targets.append(target)
except (FileNotFoundError, subprocess.CalledProcessError) as error:
LOG.warning("Failed to query target: %s\n%s", target, str(error))
deduplicated_targets.append(target)
elif target not in expanded_targets:
expanded_targets.add(target)
deduplicated_targets.append(target)
deduplicated_targets.sort(key=lambda target: targets.index(target))
self.targets = deduplicated_targets
def run_pyre(
self,
arguments: List[str],
description: str,
should_clean: bool,
command_input: Optional[str],
stderr_flag: "subprocess._FILE" = subprocess.PIPE,
) -> Optional["subprocess.CompletedProcess[str]"]:
if should_clean:
try:
# If building targets, run clean or space may run out on device!
LOG.info("Running `buck clean`...")
subprocess.call(["buck", "clean"], timeout=200)
except subprocess.TimeoutExpired:
LOG.warning("Buck timed out. Try running `buck kill` before retrying.")
return None
except subprocess.CalledProcessError as error:
LOG.warning("Error calling `buck clean`: %s", str(error))
return None
try:
LOG.info("%s", description)
return subprocess.run(
["pyre", *arguments],
stdout=subprocess.PIPE,
stderr=stderr_flag,
text=True,
input=command_input,
)
except subprocess.CalledProcessError as error:
LOG.warning("Error calling pyre: %s", str(error))
return None
def get_nested_configuration_paths(self) -> Set[Path]:
filesystem = get_filesystem()
nested_configurations = filesystem.list(
root=self.root, patterns=["**/.pyre_configuration.local"]
)
filtered_nested_configurations = {
path
for nested_configuration in nested_configurations
if (path := Path(self.root, nested_configuration)) != self.get_path()
}
LOG.info(f"Found {len(filtered_nested_configurations)} nested configurations")
LOG.debug(f"Nested configurations found: {filtered_nested_configurations}")
return filtered_nested_configurations
def get_errors(
self,
only_fix_error_code: Optional[int] = None,
should_clean: bool = True,
command_input: Optional[str] = None,
strict: bool = False,
) -> Errors:
local_root_arguments = (
["--local-configuration", self.root] if self.is_local else []
)
strict_arguments = ["--strict"] if strict else []
arguments = [*strict_arguments, *local_root_arguments, "--output=json", "check"]
pyre_output = self.run_pyre(
arguments=arguments,
description=f"Checking `{self.root}`...",
should_clean=self.targets is not None and should_clean,
command_input=command_input,
)
if not pyre_output:
return Errors.empty()
stdout = pyre_output.stdout
if stdout is None:
return Errors.empty()
stdout = stdout.strip()
try:
errors = Errors.from_json(stdout, only_fix_error_code)
except UserError as error:
LOG.info("Error when parsing Pyre error output.")
LOG.info(f"Pyre stdout: {stdout}\nPyre stderr: {pyre_output.stderr}")
raise error
LOG.info("Found %d error%s.", len(errors), "s" if len(errors) != 1 else "")
return errors
| Configuration |
python | getsentry__sentry | src/sentry/interfaces/security.py | {
"start": 2563,
"end": 3289
} | class ____(SecurityReport):
"""
An OCSP Stapling violation report
See: https://docs.google.com/document/d/1aISglJIIwglcOAhqNfK-2vtQl-_dWAapc-VLDh-9-BE
>>> {
>>> "date-time": date-time,
>>> "hostname": hostname,
>>> "port": port,
>>> "effective-expiration-date": date-time,
>>> "response-status": ResponseStatus,
>>> "ocsp-response": ocsp,
>>> "cert-status": CertStatus,
>>> "served-certificate-chain": [pem1, ... pemN],(MUST be in the order served)
>>> "validated-certificate-chain": [pem1, ... pemN](MUST be in the order served)
>>> }
"""
score = 1300
display_score = 1300
title = "Expect-Staple Report"
| ExpectStaple |
python | has2k1__plotnine | plotnine/facets/strips.py | {
"start": 4552,
"end": 5688
} | class ____(List[strip]):
"""
List of strips for a plot
"""
facet: facet
@staticmethod
def from_facet(facet: facet) -> Strips:
new = Strips()
new.facet = facet
new.setup()
return new
@property
def axs(self) -> list[Axes]:
return self.facet.axs
@property
def layout(self) -> Layout:
return self.facet.layout
@property
def theme(self) -> theme:
return self.facet.theme
@property
def top_strips(self) -> Strips:
return Strips([s for s in self if s.position == "top"])
@property
def right_strips(self) -> Strips:
return Strips([s for s in self if s.position == "right"])
def draw(self):
for s in self:
s.draw()
def setup(self) -> Self:
"""
Calculate the box information for all strips
It is stored in self.strip_info
"""
for layout_info in self.layout.get_details():
ax = self.axs[layout_info.panel_index]
lst = self.facet.make_strips(layout_info, ax)
self.extend(lst)
return self
| Strips |
python | joblib__joblib | joblib/_memmapping_reducer.py | {
"start": 21690,
"end": 28553
} | class ____(object):
"""Stateful object able to manage temporary folder and pickles
It exposes:
- a per-context folder name resolving API that memmap-based reducers will
rely on to know where to pickle the temporary memmaps
- a temporary file/folder management API that internally uses the
resource_tracker.
"""
def __init__(self, temp_folder_root=None, context_id=None):
self._current_temp_folder = None
self._temp_folder_root = temp_folder_root
self._use_shared_mem = None
self._cached_temp_folders = dict()
self._id = uuid4().hex
self._finalizers = {}
if context_id is None:
# It would be safer to not assign a default context id (less silent
# bugs), but doing this while maintaining backward compatibility
# with the previous, context-unaware version get_memmaping_executor
# exposes too many low-level details.
context_id = uuid4().hex
self.set_current_context(context_id)
def set_current_context(self, context_id):
self._current_context_id = context_id
self.register_new_context(context_id)
def register_new_context(self, context_id):
# Prepare a sub-folder name specific to a context (usually a unique id
# generated by each instance of the Parallel class). Do not create in
# advance to spare FS write access if no array is to be dumped).
if context_id in self._cached_temp_folders:
return
else:
# During its lifecycle, one Parallel object can have several
# executors associated to it (for instance, if a loky worker raises
# an exception, joblib shutdowns the executor and instantly
# recreates a new one before raising the error - see
# ``ensure_ready``. Because we don't want two executors tied to
# the same Parallel object (and thus the same context id) to
# register/use/delete the same folder, we also add an id specific
# to the current Manager (and thus specific to its associated
# executor) to the folder name.
new_folder_name = "joblib_memmapping_folder_{}_{}_{}".format(
os.getpid(), self._id, context_id
)
new_folder_path, _ = _get_temp_dir(new_folder_name, self._temp_folder_root)
self.register_folder_finalizer(new_folder_path, context_id)
self._cached_temp_folders[context_id] = new_folder_path
def resolve_temp_folder_name(self):
"""Return a folder name specific to the currently activated context"""
return self._cached_temp_folders[self._current_context_id]
# resource management API
def register_folder_finalizer(self, pool_subfolder, context_id):
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# ensure that this callback won't prevent garbage collection of
# parallel instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, "delete_folder")
resource_tracker.register(pool_subfolder, "folder")
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=["delete_folder"]
).delete_folder
try:
delete_folder(pool_subfolder, allow_non_empty=True)
resource_tracker.unregister(pool_subfolder, "folder")
except OSError:
warnings.warn(
"Failed to delete temporary folder: {}".format(pool_subfolder)
)
self._finalizers[context_id] = atexit.register(_cleanup)
def _clean_temporary_resources(
self, context_id=None, force=False, allow_non_empty=False
):
"""Clean temporary resources created by a process-based pool"""
if context_id is None:
# Iterates over a copy of the cache keys to avoid Error due to
# iterating over a changing size dictionary.
for context_id in list(self._cached_temp_folders):
self._clean_temporary_resources(
context_id, force=force, allow_non_empty=allow_non_empty
)
else:
temp_folder = self._cached_temp_folders.get(context_id)
if temp_folder and os.path.exists(temp_folder):
for filename in os.listdir(temp_folder):
if force:
# Some workers have failed and the ref counted might
# be off. The workers should have shut down by this
# time so forcefully clean up the files.
resource_tracker.unregister(
os.path.join(temp_folder, filename), "file"
)
else:
resource_tracker.maybe_unlink(
os.path.join(temp_folder, filename), "file"
)
# When forcing clean-up, try to delete the folder even if some
# files are still in it. Otherwise, try to delete the folder
allow_non_empty |= force
# Clean up the folder if possible, either if it is empty or
# if none of the files in it are in used and allow_non_empty.
try:
delete_folder(temp_folder, allow_non_empty=allow_non_empty)
# Forget the folder once it has been deleted
self._cached_temp_folders.pop(context_id, None)
resource_tracker.unregister(temp_folder, "folder")
# Also cancel the finalizers that gets triggered at gc.
finalizer = self._finalizers.pop(context_id, None)
if finalizer is not None:
atexit.unregister(finalizer)
except OSError:
# Temporary folder cannot be deleted right now.
# This folder will be cleaned up by an atexit
# finalizer registered by the memmapping_reducer.
pass
| TemporaryResourcesManager |
python | huggingface__transformers | src/transformers/models/owlv2/modeling_owlv2.py | {
"start": 35880,
"end": 38195
} | class ____(Owlv2PreTrainedModel):
config: Owlv2TextConfig
input_modalities = ("text",)
def __init__(self, config: Owlv2TextConfig):
super().__init__(config)
self.text_model = Owlv2TextTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, value):
self.text_model.embeddings.token_embedding = value
@auto_docstring
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
Examples:
```python
>>> from transformers import AutoProcessor, Owlv2TextModel
>>> model = Owlv2TextModel.from_pretrained("google/owlv2-base-patch16")
>>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16")
>>> inputs = processor(
... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
... )
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
# Get embeddings for all text queries in all batch samples
return self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Copied from transformers.models.owlvit.modeling_owlvit.OwlViTVisionTransformer with OWLVIT->OWLV2,OwlViT->Owlv2
| Owlv2TextModel |
python | huggingface__transformers | tests/models/siglip2/test_modeling_siglip2.py | {
"start": 16885,
"end": 18460
} | class ____(Siglip2ModelTesterMixin, unittest.TestCase):
all_model_classes = (Siglip2TextModel,) if is_torch_available() else ()
test_resize_embeddings = False
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = Siglip2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Siglip2TextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Siglip2TextModel does not support standalone training")
def test_training(self):
pass
@unittest.skip(reason="Siglip2TextModel does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="Siglip2TextModel does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="Siglip2TextModel does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Siglip2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google/siglip2-base-patch16-naflex"
model = Siglip2TextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| Siglip2TextModelTest |
python | huggingface__transformers | src/transformers/models/beit/image_processing_beit.py | {
"start": 1581,
"end": 2077
} | class ____(ImagesKwargs, total=False):
r"""
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
"""
do_reduce_labels: bool
@requires(backends=("vision",))
| BeitImageProcessorKwargs |
python | openai__openai-python | src/openai/resources/responses/responses.py | {
"start": 157380,
"end": 158207
} | class ____:
def __init__(self, responses: Responses) -> None:
self._responses = responses
self.create = to_streamed_response_wrapper(
responses.create,
)
self.retrieve = to_streamed_response_wrapper(
responses.retrieve,
)
self.delete = to_streamed_response_wrapper(
responses.delete,
)
self.cancel = to_streamed_response_wrapper(
responses.cancel,
)
@cached_property
def input_items(self) -> InputItemsWithStreamingResponse:
return InputItemsWithStreamingResponse(self._responses.input_items)
@cached_property
def input_tokens(self) -> InputTokensWithStreamingResponse:
return InputTokensWithStreamingResponse(self._responses.input_tokens)
| ResponsesWithStreamingResponse |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_transactions.py | {
"start": 3533,
"end": 9331
} | class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_transactions_response().with_record(_a_transaction()).with_record(_a_transaction()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_many_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_transactions_response().with_pagination().with_record(_a_transaction().with_id("last_record_id_from_first_page")).build(),
)
http_mocker.get(
_transactions_request()
.with_starting_after("last_record_id_from_first_page")
.with_created_gte(_A_START_DATE)
.with_created_lte(_NOW)
.with_limit(100)
.build(),
_transactions_response().with_record(_a_transaction()).with_record(_a_transaction()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_no_state_when_read_then_return_ignore_lookback(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_transactions_response().with_record(_a_transaction()).build(),
)
self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
# request matched http_mocker
@HttpMocker()
def test_when_read_then_add_cursor_field(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_transactions_response().with_record(_a_transaction()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
assert output.records[0].record.data["updated"] == output.records[0].record.data["created"]
@HttpMocker()
def test_given_slice_range_when_read_then_perform_multiple_requests(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=30)
slice_range = timedelta(days=20)
slice_datetime = start_date + slice_range
http_mocker.get(
_transactions_request()
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_transactions_response().build(),
)
http_mocker.get(
_transactions_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).build(),
_transactions_response().build(),
)
self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
# request matched http_mocker
@HttpMocker()
def test_given_http_status_400_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_any_query_params().build(),
a_response_with_status(400),
)
output = self._read(_config())
assert_stream_did_not_run(output, _STREAM_NAME)
@HttpMocker()
def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_any_query_params().build(),
[
a_response_with_status(429),
_transactions_response().with_record(_a_transaction()).build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_any_query_params().build(),
[a_response_with_status(500), _transactions_response().with_record(_a_transaction()).build()],
)
output = self._read(_config())
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_transactions_request().with_any_query_params().build(),
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=0):
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
| FullRefreshTest |
python | python-pillow__Pillow | src/PIL/Image.py | {
"start": 11249,
"end": 13163
} | class ____:
"""
Used with :py:meth:`~PIL.Image.Image.point` for single band images with more than
8 bits, this represents an affine transformation, where the value is multiplied by
``scale`` and ``offset`` is added.
"""
def __init__(self, scale: float, offset: float) -> None:
self.scale = scale
self.offset = offset
def __neg__(self) -> ImagePointTransform:
return ImagePointTransform(-self.scale, -self.offset)
def __add__(self, other: ImagePointTransform | float) -> ImagePointTransform:
if isinstance(other, ImagePointTransform):
return ImagePointTransform(
self.scale + other.scale, self.offset + other.offset
)
return ImagePointTransform(self.scale, self.offset + other)
__radd__ = __add__
def __sub__(self, other: ImagePointTransform | float) -> ImagePointTransform:
return self + -other
def __rsub__(self, other: ImagePointTransform | float) -> ImagePointTransform:
return other + -self
def __mul__(self, other: ImagePointTransform | float) -> ImagePointTransform:
if isinstance(other, ImagePointTransform):
return NotImplemented
return ImagePointTransform(self.scale * other, self.offset * other)
__rmul__ = __mul__
def __truediv__(self, other: ImagePointTransform | float) -> ImagePointTransform:
if isinstance(other, ImagePointTransform):
return NotImplemented
return ImagePointTransform(self.scale / other, self.offset / other)
def _getscaleoffset(
expr: Callable[[ImagePointTransform], ImagePointTransform | float],
) -> tuple[float, float]:
a = expr(ImagePointTransform(1, 0))
return (a.scale, a.offset) if isinstance(a, ImagePointTransform) else (0, a)
# --------------------------------------------------------------------
# Implementation wrapper
| ImagePointTransform |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 110946,
"end": 112826
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Qwen3OmniMoeTalkerCodePredictorAttention(config=config, layer_idx=layer_idx)
self.mlp = Qwen3OmniMoeMLP(config)
self.input_layernorm = Qwen3OmniMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3OmniMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attention_type = config.layer_types[layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| Qwen3OmniMoeTalkerCodePredictorDecoderLayer |
python | doocs__leetcode | solution/3100-3199/3197.Find the Minimum Area to Cover All Ones II/Solution.py | {
"start": 0,
"end": 1948
} | class ____:
def minimumSum(self, grid: List[List[int]]) -> int:
def f(i1: int, j1: int, i2: int, j2: int) -> int:
x1 = y1 = inf
x2 = y2 = -inf
for i in range(i1, i2 + 1):
for j in range(j1, j2 + 1):
if grid[i][j] == 1:
x1 = min(x1, i)
y1 = min(y1, j)
x2 = max(x2, i)
y2 = max(y2, j)
return (x2 - x1 + 1) * (y2 - y1 + 1)
m, n = len(grid), len(grid[0])
ans = m * n
for i1 in range(m - 1):
for i2 in range(i1 + 1, m - 1):
ans = min(
ans,
f(0, 0, i1, n - 1)
+ f(i1 + 1, 0, i2, n - 1)
+ f(i2 + 1, 0, m - 1, n - 1),
)
for j1 in range(n - 1):
for j2 in range(j1 + 1, n - 1):
ans = min(
ans,
f(0, 0, m - 1, j1)
+ f(0, j1 + 1, m - 1, j2)
+ f(0, j2 + 1, m - 1, n - 1),
)
for i in range(m - 1):
for j in range(n - 1):
ans = min(
ans,
f(0, 0, i, j) + f(0, j + 1, i, n - 1) + f(i + 1, 0, m - 1, n - 1),
)
ans = min(
ans,
f(0, 0, i, n - 1)
+ f(i + 1, 0, m - 1, j)
+ f(i + 1, j + 1, m - 1, n - 1),
)
ans = min(
ans,
f(0, 0, i, j) + f(i + 1, 0, m - 1, j) + f(0, j + 1, m - 1, n - 1),
)
ans = min(
ans,
f(0, 0, m - 1, j)
+ f(0, j + 1, i, n - 1)
+ f(i + 1, j + 1, m - 1, n - 1),
)
return ans
| Solution |
python | coleifer__peewee | tests/model_sql.py | {
"start": 345,
"end": 515
} | class ____(TestModel):
category = CharField()
key = CharField()
value = IntegerField()
class Meta:
primary_key = CompositeKey('category', 'key')
| CKM |
python | django__django | tests/auth_tests/models/proxy.py | {
"start": 256,
"end": 402
} | class ____(User):
class Meta:
proxy = True
permissions = (("use_different_app_label", "May use a different app label"),)
| UserProxy |
python | google__python-fire | fire/main_test.py | {
"start": 1398,
"end": 3318
} | class ____(testutils.BaseTestCase):
"""Tests to verify correct import behavior for file executables."""
def setUp(self):
super().setUp()
self.file = tempfile.NamedTemporaryFile(suffix='.py') # pylint: disable=consider-using-with
self.file.write(b'class Foo:\n def double(self, n):\n return 2 * n\n')
self.file.flush()
self.file2 = tempfile.NamedTemporaryFile() # pylint: disable=consider-using-with
def testFileNameFire(self):
# Confirm that the file is correctly imported and doubles the number.
with self.assertOutputMatches('4'):
__main__.main(
['__main__.py', self.file.name, 'Foo', 'double', '--n', '2'])
def testFileNameFailure(self):
# Confirm that an existing file without a .py suffix raises a ValueError.
with self.assertRaises(ValueError):
__main__.main(
['__main__.py', self.file2.name, 'Foo', 'double', '--n', '2'])
def testFileNameModuleDuplication(self):
# Confirm that a file that masks a module still loads the module.
with self.assertOutputMatches('gettempdir'):
dirname = os.path.dirname(self.file.name)
with testutils.ChangeDirectory(dirname):
with open('tempfile', 'w'):
__main__.main([
'__main__.py',
'tempfile',
])
os.remove('tempfile')
def testFileNameModuleFileFailure(self):
# Confirm that an invalid file that masks a non-existent module fails.
with self.assertRaisesRegex(ValueError,
r'Fire can only be called on \.py files\.'): # pylint: disable=line-too-long,
dirname = os.path.dirname(self.file.name)
with testutils.ChangeDirectory(dirname):
with open('foobar', 'w'):
__main__.main([
'__main__.py',
'foobar',
])
os.remove('foobar')
if __name__ == '__main__':
testutils.main()
| MainModuleFileTest |
python | walkccc__LeetCode | solutions/1946. Largest Number After Mutating Substring/1946.py | {
"start": 0,
"end": 374
} | class ____:
def maximumNumber(self, num: str, change: list[int]) -> str:
numList = list(num)
mutated = False
for i, c in enumerate(numList):
d = int(c)
numlist[i] = chr(ord('0') + max(d, change[d]))
if mutated and d > change[d]:
return ''.join(numList)
if d < change[d]:
mutated = True
return ''.join(numList)
| Solution |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 222252,
"end": 222332
} | class ____(TestBSRNonCanonical, TestBSRMatrix):
pass
| TestBSRNonCanonicalMatrix |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 12367,
"end": 15443
} | class ____(MixpanelHttpRequester):
cursor_field = "time"
default_project_timezone = "US/Pacific"
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters)
self._from_date_lookback_window = max(
self.config.get("export_lookback_window", 0), self.config.get("attribution_window", 0) * 24 * 60 * 60
)
self._to_date_lookback_window = 1
self._time_lookback_window = self.config.get("export_lookback_window", 0)
if self.config.get("end_date"):
self._validate_end_date()
self._end_date = pendulum.parse(self.config.get("end_date")).date()
else:
self._end_date = (
pendulum.today(tz=self.config.get("project_timezone", self.default_project_timezone))
- timedelta(days=self._to_date_lookback_window)
).date()
def _validate_end_date(self) -> None:
date_str = self.config.get("end_date")
try:
return pendulum.parse(date_str).date()
except pendulum.parsing.exceptions.ParserError as e:
raise_config_error(f"time data '{date_str}' does not match format '%Y-%m-%dT%H:%M:%SZ'", e)
def get_request_params(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
request_params = super().get_request_params(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
start_time = stream_slice.cursor_slice.get("start_time")
from_date_value = (pendulum.parse(start_time) - timedelta(seconds=self._from_date_lookback_window)).date()
to_date_value = self._end_date
time_value = int((pendulum.parse(start_time) - timedelta(seconds=self._time_lookback_window)).timestamp())
request_params["from_date"] = from_date_value.format("YYYY-MM-DD")
request_params["to_date"] = to_date_value.format("YYYY-MM-DD")
request_params["where"] = f'properties["$time"]>=datetime({time_value})'
return request_params
def iter_dicts(lines, logger=logging.getLogger("airbyte")):
"""
The incoming stream has to be JSON lines format.
From time to time for some reason, the one record can be split into multiple lines.
We try to combine such split parts into one record only if parts go nearby.
"""
parts = []
for record_line in lines:
if record_line == "terminated early":
logger.warning(f"Couldn't fetch data from Export API. Response: {record_line}")
return
try:
yield json.loads(record_line)
except ValueError:
parts.append(record_line)
else:
parts = []
if len(parts) > 1:
try:
yield json.loads("".join(parts))
except ValueError:
pass
else:
parts = []
| ExportHttpRequester |
python | django__django | tests/fixtures_regress/models.py | {
"start": 1533,
"end": 1605
} | class ____(Article):
class Meta:
abstract = True
| CommonFeature |
python | automl__auto-sklearn | test/test_pipeline/components/classification/test_extra_trees.py | {
"start": 184,
"end": 1042
} | class ____(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.96
res["iris_n_calls"] = 9
res["default_iris_iterative"] = res["default_iris"]
res["default_iris_proba"] = 0.10053485167017469
res["default_iris_sparse"] = 0.74
res["default_digits"] = 0.9216757741347905
res["digits_n_calls"] = 9
res["default_digits_iterative"] = res["default_digits"]
res["default_digits_iterative_places"] = 3
res["default_digits_binary"] = 0.994535519125683
res["default_digits_multilabel"] = 0.9983621593291405
res["default_digits_multilabel_proba"] = 0.997710730679746
sk_mod = sklearn.ensemble.ExtraTreesClassifier
module = ExtraTreesClassifier
step_hyperparameter = {
"name": "n_estimators",
"value": module.get_max_iter(),
}
| ExtraTreesComponentTest |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/community.py | {
"start": 97,
"end": 314
} | class ____(Panel):
"""
A panel that provides links to the Django Debug Toolbar community.
"""
is_async = True
template = "debug_toolbar/panels/community.html"
title = _("Community")
| CommunityPanel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/associationproxy.py | {
"start": 10421,
"end": 10629
} | class ____(_SetterProtocol, Protocol[_T_con]):
def __call__(self, instance: Any, key: Any, value: _T_con) -> None: ...
# mypy 0.990 we are no longer allowed to make this Protocol[_T_con]
| _DictSetterProtocol |
python | apache__airflow | providers/dbt/cloud/src/airflow/providers/dbt/cloud/operators/dbt.py | {
"start": 1550,
"end": 1842
} | class ____(BaseOperatorLink):
"""Allows users to monitor the triggered job run directly in dbt Cloud."""
name = "Monitor Job Run"
def get_link(self, operator: BaseOperator, *, ti_key=None):
return XCom.get_value(key="job_run_url", ti_key=ti_key)
| DbtCloudRunJobOperatorLink |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 42758,
"end": 56227
} | class ____(Request):
"""
Get all the company's projects and all public projects
:param id: List of IDs to filter by
:type id: Sequence[str]
:param name: Get only projects whose name matches this pattern (python regular
expression syntax)
:type name: str
:param description: Get only projects whose description matches this pattern
(python regular expression syntax)
:type description: str
:param tags: User-defined tags list used to filter results. Prepend '-' to tag
name to indicate exclusion
:type tags: Sequence[str]
:param system_tags: System tags list used to filter results. Prepend '-' to
system tag name to indicate exclusion
:type system_tags: Sequence[str]
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param page: Page number, returns a specific page out of the resulting list of
dataviews
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param search_text: Free text search query
:type search_text: str
:param only_fields: List of document's field names (nesting is supported using
'.', e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
:param _all_: Multi-field pattern condition (all fields match pattern)
:type _all_: MultiFieldPatternData
:param _any_: Multi-field pattern condition (any field matches pattern)
:type _any_: MultiFieldPatternData
:param shallow_search: If set to 'true' then the search with the specified
criteria is performed among top level projects only (or if parents specified,
among the direct children of the these parents). Otherwise the search is
performed among all the company projects (or among all of the descendants of
the specified parents).
:type shallow_search: bool
"""
_service = "projects"
_action = "get_all"
_version = "2.13"
_schema = {
"definitions": {
"multi_field_pattern_data": {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"_all_": {
"description": "Multi-field pattern condition (all fields match pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"_any_": {
"description": "Multi-field pattern condition (any field matches pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"description": {
"description": "Get only projects whose description matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"id": {
"description": "List of IDs to filter by",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Get only projects whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": "List of document's field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page",
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the resulting list of dataviews",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": "Page size, specifies the number of results returned in each page (last page may contain fewer results)",
"minimum": 1,
"type": ["integer", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"shallow_search": {
"default": False,
"description": "If set to 'true' then the search with the specified criteria is performed among top level projects only (or if parents specified, among the direct children of the these parents). Otherwise the search is performed among all the company projects (or among all of the descendants of the specified parents).",
"type": ["boolean", "null"],
},
"system_tags": {
"description": "System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[List[str]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
order_by: Optional[List[str]] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
search_text: Optional[str] = None,
only_fields: Optional[List[str]] = None,
_all_: Any = None,
_any_: Any = None,
shallow_search: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetAllRequest, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.tags = tags
self.system_tags = system_tags
self.order_by = order_by
self.page = page
self.page_size = page_size
self.search_text = search_text
self.only_fields = only_fields
self._all_ = _all_
self._any_ = _any_
self.shallow_search = shallow_search
@schema_property("id")
def id(self) -> Optional[List[str]]:
return self._property_id
@id.setter
def id(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("description")
def description(self) -> Optional[str]:
return self._property_description
@description.setter
def description(self, value: Optional[str]) -> None:
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("order_by")
def order_by(self) -> Optional[List[str]]:
return self._property_order_by
@order_by.setter
def order_by(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("page")
def page(self) -> Optional[int]:
return self._property_page
@page.setter
def page(self, value: Optional[int]) -> None:
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self) -> Optional[int]:
return self._property_page_size
@page_size.setter
def page_size(self, value: Optional[int]) -> None:
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("search_text")
def search_text(self) -> Optional[str]:
return self._property_search_text
@search_text.setter
def search_text(self, value: Optional[str]) -> None:
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("only_fields")
def only_fields(self) -> Optional[List[str]]:
return self._property_only_fields
@only_fields.setter
def only_fields(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("_all_")
def _all_(self) -> Any:
return self._property__all_
@_all_.setter
def _all_(self, value: Any) -> None:
if value is None:
self._property__all_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_all_", MultiFieldPatternData)
self._property__all_ = value
@schema_property("_any_")
def _any_(self) -> Any:
return self._property__any_
@_any_.setter
def _any_(self, value: Any) -> None:
if value is None:
self._property__any_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_any_", MultiFieldPatternData)
self._property__any_ = value
@schema_property("shallow_search")
def shallow_search(self) -> Optional[bool]:
return self._property_shallow_search
@shallow_search.setter
def shallow_search(self, value: Optional[bool]) -> None:
if value is None:
self._property_shallow_search = None
return
self.assert_isinstance(value, "shallow_search", (bool,))
self._property_shallow_search = value
| GetAllRequest |
python | getsentry__sentry | src/sentry/api/endpoints/debug_files.py | {
"start": 3928,
"end": 7110
} | class ____(ProjectEndpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectReleasePermission,)
def post(self, request: Request, project: Project) -> Response:
release_name = request.data.get("release_name")
proguard_uuid = request.data.get("proguard_uuid")
missing_fields = []
if not release_name:
missing_fields.append("release_name")
if not proguard_uuid:
missing_fields.append("proguard_uuid")
if missing_fields:
error_message = f"Missing required fields: {', '.join(missing_fields)}"
return Response(data={"error": error_message}, status=status.HTTP_400_BAD_REQUEST)
assert release_name is not None and proguard_uuid is not None
try:
uuid.UUID(proguard_uuid)
except ValueError:
return Response(
data={"error": "Invalid proguard_uuid"}, status=status.HTTP_400_BAD_REQUEST
)
proguard_uuid = str(proguard_uuid)
difs = ProjectDebugFile.objects.find_by_debug_ids(project, [proguard_uuid])
if not difs:
return Response(
data={"error": "No matching proguard mapping file with this uuid found"},
status=status.HTTP_400_BAD_REQUEST,
)
try:
ProguardArtifactRelease.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release_name=release_name,
project_debug_file=difs[proguard_uuid],
proguard_uuid=proguard_uuid,
)
return Response(status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
data={
"error": "Proguard artifact release with this name in this project already exists."
},
status=status.HTTP_409_CONFLICT,
)
def get(self, request: Request, project: Project) -> Response:
"""
List a Project's Proguard Associated Releases
````````````````````````````````````````
Retrieve a list of associated releases for a given Proguard File.
:pparam string organization_id_or_slug: the id or slug of the organization the
file belongs to.
:pparam string project_id_or_slug: the id or slug of the project to list the
DIFs of.
:qparam string proguard_uuid: the uuid of the Proguard file.
:auth: required
"""
proguard_uuid = request.GET.get("proguard_uuid")
releases = None
if proguard_uuid:
releases = ProguardArtifactRelease.objects.filter(
organization_id=project.organization_id,
project_id=project.id,
proguard_uuid=proguard_uuid,
).values_list("release_name", flat=True)
return Response({"releases": releases})
@region_silo_endpoint
| ProguardArtifactReleasesEndpoint |
python | huggingface__transformers | src/transformers/models/voxtral/modular_voxtral.py | {
"start": 1397,
"end": 1459
} | class ____(Qwen2AudioEncoderLayer):
pass
| VoxtralEncoderLayer |
python | doocs__leetcode | solution/0000-0099/0041.First Missing Positive/Solution.py | {
"start": 0,
"end": 389
} | class ____:
def firstMissingPositive(self, nums: List[int]) -> int:
n = len(nums)
for i in range(n):
while 1 <= nums[i] <= n and nums[i] != nums[nums[i] - 1]:
j = nums[i] - 1
nums[i], nums[j] = nums[j], nums[i]
for i in range(n):
if nums[i] != i + 1:
return i + 1
return n + 1
| Solution |
python | doocs__leetcode | solution/1800-1899/1897.Redistribute Characters to Make All Strings Equal/Solution.py | {
"start": 0,
"end": 243
} | class ____:
def makeEqual(self, words: List[str]) -> bool:
cnt = Counter()
for w in words:
for c in w:
cnt[c] += 1
n = len(words)
return all(v % n == 0 for v in cnt.values())
| Solution |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 3115,
"end": 3249
} | class ____(AutoEnum):
"""Enumeration of worker statuses."""
ONLINE = AutoEnum.auto()
OFFLINE = AutoEnum.auto()
| WorkerStatus |
python | astropy__astropy | astropy/coordinates/builtin_frames/lsr.py | {
"start": 9360,
"end": 11267
} | class ____(BaseRADecFrame):
r"""A frame in the Dynamical Local Standard of Rest (LSR).
Conceptually the dynamical LSR is a frame moving at the circular
velocity at the Sun's location. In practice, the concept of a
circular velocity in a non-axisymmetric galaxy is not trivial.
This LSRD frame uses the historical definition from
Delhaye 1965, Solar Motion and Velocity Distribution of
Common Stars - Section 2.1.
meaning the solar motion is
:math:`(U, V, W) = (9, 12, 7)~{{\rm km}}~{{\rm s}}^{{-1}}`,
or 16.5 km/s towards l=53 b=25. The frame is axis-aligned and
co-spatial with `~astropy.coordinates.ICRS`.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# V_BARY_DELHAYE1965 = r.CartesianDifferential([9, 12, 7] * u.km/u.s)
# V_OFFSET_LSRD = (Galactic(V_BARY_DELHAYE1965.to_cartesian()).transform_to(ICRS()).data
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRD = r.CartesianDifferential(
[-0.6382306360182073, -14.585424483191094, 7.8011572411006815] * u.km / u.s
)
ICRS_LSRD_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRD
)
LSRD_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRD
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRD)
def icrs_to_lsrd(icrs_coord, lsr_frame):
return None, ICRS_LSRD_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRD, ICRS)
def lsrd_to_icrs(lsr_coord, icrs_frame):
return None, LSRD_ICRS_OFFSET
# ------------------------------------------------------------------------------
# Create loopback transformations
frame_transform_graph._add_merged_transform(LSR, ICRS, LSR)
frame_transform_graph._add_merged_transform(GalacticLSR, Galactic, GalacticLSR)
| LSRD |
python | giampaolo__psutil | tests/test_windows.py | {
"start": 27543,
"end": 30645
} | class ____(PsutilTestCase):
"""Certain functions require calling ReadProcessMemory.
This trivially works when called on the current process.
Check that this works on other processes, especially when they
have a different bitness.
"""
@staticmethod
def find_other_interpreter():
# find a python interpreter that is of the opposite bitness from us
code = "import sys; sys.stdout.write(str(sys.maxsize > 2**32))"
# XXX: a different and probably more stable approach might be to access
# the registry but accessing 64 bit paths from a 32 bit process
for filename in glob.glob(r"C:\Python*\python.exe"):
proc = subprocess.Popen(
args=[filename, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
output, _ = proc.communicate()
proc.wait()
if output == str(not IS_64BIT):
return filename
test_args = ["-c", "import sys; sys.stdin.read()"]
def setUp(self):
super().setUp()
other_python = self.find_other_interpreter()
if other_python is None:
return pytest.skip(
"could not find interpreter with opposite bitness"
)
if IS_64BIT:
self.python64 = sys.executable
self.python32 = other_python
else:
self.python64 = other_python
self.python32 = sys.executable
env = os.environ.copy()
env["THINK_OF_A_NUMBER"] = str(os.getpid())
self.proc32 = self.spawn_subproc(
[self.python32] + self.test_args, env=env, stdin=subprocess.PIPE
)
self.proc64 = self.spawn_subproc(
[self.python64] + self.test_args, env=env, stdin=subprocess.PIPE
)
def tearDown(self):
super().tearDown()
self.proc32.communicate()
self.proc64.communicate()
def test_cmdline_32(self):
p = psutil.Process(self.proc32.pid)
assert len(p.cmdline()) == 3
assert p.cmdline()[1:] == self.test_args
def test_cmdline_64(self):
p = psutil.Process(self.proc64.pid)
assert len(p.cmdline()) == 3
assert p.cmdline()[1:] == self.test_args
def test_cwd_32(self):
p = psutil.Process(self.proc32.pid)
assert p.cwd() == os.getcwd()
def test_cwd_64(self):
p = psutil.Process(self.proc64.pid)
assert p.cwd() == os.getcwd()
def test_environ_32(self):
p = psutil.Process(self.proc32.pid)
e = p.environ()
assert "THINK_OF_A_NUMBER" in e
assert e["THINK_OF_A_NUMBER"] == str(os.getpid())
def test_environ_64(self):
p = psutil.Process(self.proc64.pid)
try:
p.environ()
except psutil.AccessDenied:
pass
# ===================================================================
# Windows services
# ===================================================================
@pytest.mark.skipif(not WINDOWS, reason="WINDOWS only")
| RemoteProcessTestCase |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 123453,
"end": 125693
} | class ____(Response):
"""
Response of projects.get_task_parents endpoint.
:param parents: The list of unique task parents sorted by their names
:type parents: Sequence[dict]
"""
_service = "projects"
_action = "get_task_parents"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"parents": {
"description": "The list of unique task parents sorted by their names",
"items": {
"properties": {
"id": {
"description": "The ID of the parent task",
"type": "string",
},
"name": {
"description": "The name of the parent task",
"type": "string",
},
"project": {
"id": {
"description": "The ID of the parent task project",
"type": "string",
},
"name": {
"description": "The name of the parent task project",
"type": "string",
},
"type": "object",
},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, parents: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetTaskParentsResponse, self).__init__(**kwargs)
self.parents = parents
@schema_property("parents")
def parents(self) -> Optional[List[dict]]:
return self._property_parents
@parents.setter
def parents(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_parents = None
return
self.assert_isinstance(value, "parents", (list, tuple))
self.assert_isinstance(value, "parents", (dict,), is_array=True)
self._property_parents = value
| GetTaskParentsResponse |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_organization_release_commits.py | {
"start": 266,
"end": 1715
} | class ____(APITestCase):
def test_simple(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
repo = Repository.objects.create(organization_id=project.organization_id, name=project.name)
commit = Commit.objects.create(
organization_id=project.organization_id, repository_id=repo.id, key="a" * 40
)
commit2 = Commit.objects.create(
organization_id=project.organization_id, repository_id=repo.id, key="b" * 40
)
ReleaseCommit.objects.create(
organization_id=project.organization_id, release=release, commit=commit, order=1
)
ReleaseCommit.objects.create(
organization_id=project.organization_id, release=release, commit=commit2, order=0
)
url = reverse(
"sentry-api-0-organization-release-commits",
kwargs={
"organization_id_or_slug": project.organization.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]["id"] == commit2.key
assert response.data[1]["id"] == commit.key
| ReleaseCommitsListTest |
python | PrefectHQ__prefect | tests/runtime/test_task_run.py | {
"start": 4778,
"end": 5169
} | class ____:
async def test_name_is_attribute(self):
assert "name" in dir(task_run)
async def test_name_is_none_when_not_set(self):
assert task_run.name is None
async def test_name_from_context(self):
with TaskRunContext.model_construct(
task_run=TaskRun.model_construct(name="foo")
):
assert task_run.name == "foo"
| TestName |
python | squidfunk__mkdocs-material | material/plugins/blog/structure/options.py | {
"start": 4870,
"end": 5081
} | class ____(ListOfItems[T]):
# Ensure that each item is unique
def run_validation(self, value: object):
data = super().run_validation(value)
return list(dict.fromkeys(data))
| UniqueListOfItems |
python | pypa__setuptools | setuptools/_distutils/tests/test_build_ext.py | {
"start": 2442,
"end": 22341
} | class ____(TempdirManager):
def build_ext(self, *args, **kwargs):
return build_ext(*args, **kwargs)
@pytest.mark.parametrize("copy_so", [False])
def test_build_ext(self, copy_so):
missing_compiler_executable()
copy_xxmodule_c(self.tmp_dir)
xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
xx_ext = Extension('xx', [xx_c])
if sys.platform != "win32":
if not copy_so:
xx_ext = Extension(
'xx',
[xx_c],
library_dirs=['/usr/lib'],
libraries=['z'],
runtime_library_dirs=['/usr/lib'],
)
elif sys.platform == 'linux':
libz_so = {
os.path.realpath(name) for name in glob.iglob('/usr/lib*/libz.so*')
}
libz_so = sorted(libz_so, key=lambda lib_path: len(lib_path))
shutil.copyfile(libz_so[-1], '/tmp/libxx_z.so')
xx_ext = Extension(
'xx',
[xx_c],
library_dirs=['/tmp'],
libraries=['xx_z'],
runtime_library_dirs=['/tmp'],
)
dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]})
dist.package_dir = self.tmp_dir
cmd = self.build_ext(dist)
fixup_build_ext(cmd)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
old_stdout = sys.stdout
if not support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
with safe_extension_import('xx', self.tmp_dir):
self._test_xx(copy_so)
if sys.platform == 'linux' and copy_so:
os.unlink('/tmp/libxx_z.so')
@staticmethod
def _test_xx(copy_so):
import xx # type: ignore[import-not-found] # Module generated for tests
for attr in ('error', 'foo', 'new', 'roj'):
assert hasattr(xx, attr)
assert xx.foo(2, 5) == 7
assert xx.foo(13, 15) == 28
assert xx.new().demo() is None
if support.HAVE_DOCSTRINGS:
doc = 'This is a template module just for instruction.'
assert xx.__doc__ == doc
assert isinstance(xx.Null(), xx.Null)
assert isinstance(xx.Str(), xx.Str)
if sys.platform == 'linux':
so_headers = subprocess.check_output(
["readelf", "-d", xx.__file__], universal_newlines=True
)
import pprint
pprint.pprint(so_headers)
rpaths = [
rpath
for line in so_headers.split("\n")
if "RPATH" in line or "RUNPATH" in line
for rpath in line.split()[2][1:-1].split(":")
]
if not copy_so:
pprint.pprint(rpaths)
# Linked against a library in /usr/lib{,64}
assert "/usr/lib" not in rpaths and "/usr/lib64" not in rpaths
else:
# Linked against a library in /tmp
assert "/tmp" in rpaths
# The import is the real test here
def test_solaris_enable_shared(self):
dist = Distribution({'name': 'xx'})
cmd = self.build_ext(dist)
old = sys.platform
sys.platform = 'sunos' # fooling finalize_options
from distutils.sysconfig import _config_vars
old_var = _config_vars.get('Py_ENABLE_SHARED')
_config_vars['Py_ENABLE_SHARED'] = True
try:
cmd.ensure_finalized()
finally:
sys.platform = old
if old_var is None:
del _config_vars['Py_ENABLE_SHARED']
else:
_config_vars['Py_ENABLE_SHARED'] = old_var
# make sure we get some library dirs under solaris
assert len(cmd.library_dirs) > 0
def test_user_site(self):
import site
dist = Distribution({'name': 'xx'})
cmd = self.build_ext(dist)
# making sure the user option is there
options = [name for name, short, label in cmd.user_options]
assert 'user' in options
# setting a value
cmd.user = True
# setting user based lib and include
lib = os.path.join(site.USER_BASE, 'lib')
incl = os.path.join(site.USER_BASE, 'include')
os.mkdir(lib)
os.mkdir(incl)
# let's run finalize
cmd.ensure_finalized()
# see if include_dirs and library_dirs
# were set
assert lib in cmd.library_dirs
assert lib in cmd.rpath
assert incl in cmd.include_dirs
def test_optional_extension(self):
# this extension will fail, but let's ignore this failure
# with the optional argument.
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = self.build_ext(dist)
cmd.ensure_finalized()
with pytest.raises((UnknownFileError, CompileError)):
cmd.run() # should raise an error
modules = [Extension('foo', ['xxx'], optional=True)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = self.build_ext(dist)
cmd.ensure_finalized()
cmd.run() # should pass
def test_finalize_options(self):
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = self.build_ext(dist)
cmd.finalize_options()
py_include = sysconfig.get_python_inc()
for p in py_include.split(os.path.pathsep):
assert p in cmd.include_dirs
plat_py_include = sysconfig.get_python_inc(plat_specific=True)
for p in plat_py_include.split(os.path.pathsep):
assert p in cmd.include_dirs
# make sure cmd.libraries is turned into a list
# if it's a string
cmd = self.build_ext(dist)
cmd.libraries = 'my_lib, other_lib lastlib'
cmd.finalize_options()
assert cmd.libraries == ['my_lib', 'other_lib', 'lastlib']
# make sure cmd.library_dirs is turned into a list
# if it's a string
cmd = self.build_ext(dist)
cmd.library_dirs = f'my_lib_dir{os.pathsep}other_lib_dir'
cmd.finalize_options()
assert 'my_lib_dir' in cmd.library_dirs
assert 'other_lib_dir' in cmd.library_dirs
# make sure rpath is turned into a list
# if it's a string
cmd = self.build_ext(dist)
cmd.rpath = f'one{os.pathsep}two'
cmd.finalize_options()
assert cmd.rpath == ['one', 'two']
# make sure cmd.link_objects is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.link_objects = 'one two,three'
cmd.finalize_options()
assert cmd.link_objects == ['one', 'two', 'three']
# XXX more tests to perform for win32
# make sure define is turned into 2-tuples
# strings if they are ','-separated strings
cmd = self.build_ext(dist)
cmd.define = 'one,two'
cmd.finalize_options()
assert cmd.define == [('one', '1'), ('two', '1')]
# make sure undef is turned into a list of
# strings if they are ','-separated strings
cmd = self.build_ext(dist)
cmd.undef = 'one,two'
cmd.finalize_options()
assert cmd.undef == ['one', 'two']
# make sure swig_opts is turned into a list
cmd = self.build_ext(dist)
cmd.swig_opts = None
cmd.finalize_options()
assert cmd.swig_opts == []
cmd = self.build_ext(dist)
cmd.swig_opts = '1 2'
cmd.finalize_options()
assert cmd.swig_opts == ['1', '2']
def test_check_extensions_list(self):
dist = Distribution()
cmd = self.build_ext(dist)
cmd.finalize_options()
# 'extensions' option must be a list of Extension instances
with pytest.raises(DistutilsSetupError):
cmd.check_extensions_list('foo')
# each element of 'ext_modules' option must be an
# Extension instance or 2-tuple
exts = [('bar', 'foo', 'bar'), 'foo']
with pytest.raises(DistutilsSetupError):
cmd.check_extensions_list(exts)
# first element of each tuple in 'ext_modules'
# must be the extension name (a string) and match
# a python dotted-separated name
exts = [('foo-bar', '')]
with pytest.raises(DistutilsSetupError):
cmd.check_extensions_list(exts)
# second element of each tuple in 'ext_modules'
# must be a dictionary (build info)
exts = [('foo.bar', '')]
with pytest.raises(DistutilsSetupError):
cmd.check_extensions_list(exts)
# ok this one should pass
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', 'some': 'bar'})]
cmd.check_extensions_list(exts)
ext = exts[0]
assert isinstance(ext, Extension)
# check_extensions_list adds in ext the values passed
# when they are in ('include_dirs', 'library_dirs', 'libraries'
# 'extra_objects', 'extra_compile_args', 'extra_link_args')
assert ext.libraries == 'foo'
assert not hasattr(ext, 'some')
# 'macros' element of build info dict must be 1- or 2-tuple
exts = [
(
'foo.bar',
{
'sources': [''],
'libraries': 'foo',
'some': 'bar',
'macros': [('1', '2', '3'), 'foo'],
},
)
]
with pytest.raises(DistutilsSetupError):
cmd.check_extensions_list(exts)
exts[0][1]['macros'] = [('1', '2'), ('3',)]
cmd.check_extensions_list(exts)
assert exts[0].undef_macros == ['3']
assert exts[0].define_macros == [('1', '2')]
def test_get_source_files(self):
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = self.build_ext(dist)
cmd.ensure_finalized()
assert cmd.get_source_files() == ['xxx']
def test_unicode_module_names(self):
modules = [
Extension('foo', ['aaa'], optional=False),
Extension('föö', ['uuu'], optional=False),
]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = self.build_ext(dist)
cmd.ensure_finalized()
assert re.search(r'foo(_d)?\..*', cmd.get_ext_filename(modules[0].name))
assert re.search(r'föö(_d)?\..*', cmd.get_ext_filename(modules[1].name))
assert cmd.get_export_symbols(modules[0]) == ['PyInit_foo']
assert cmd.get_export_symbols(modules[1]) == ['PyInitU_f_1gaa']
def test_export_symbols__init__(self):
# https://github.com/python/cpython/issues/80074
# https://github.com/pypa/setuptools/issues/4826
modules = [
Extension('foo.__init__', ['aaa']),
Extension('föö.__init__', ['uuu']),
]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = self.build_ext(dist)
cmd.ensure_finalized()
assert cmd.get_export_symbols(modules[0]) == ['PyInit_foo']
assert cmd.get_export_symbols(modules[1]) == ['PyInitU_f_1gaa']
def test_compiler_option(self):
# cmd.compiler is an option and
# should not be overridden by a compiler instance
# when the command is run
dist = Distribution()
cmd = self.build_ext(dist)
cmd.compiler = 'unix'
cmd.ensure_finalized()
cmd.run()
assert cmd.compiler == 'unix'
def test_get_outputs(self):
missing_compiler_executable()
tmp_dir = self.mkdtemp()
c_file = os.path.join(tmp_dir, 'foo.c')
self.write_file(c_file, 'void PyInit_foo(void) {}\n')
ext = Extension('foo', [c_file], optional=False)
dist = Distribution({'name': 'xx', 'ext_modules': [ext]})
cmd = self.build_ext(dist)
fixup_build_ext(cmd)
cmd.ensure_finalized()
assert len(cmd.get_outputs()) == 1
cmd.build_lib = os.path.join(self.tmp_dir, 'build')
cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
# issue #5977 : distutils build_ext.get_outputs
# returns wrong result with --inplace
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
cmd.inplace = True
cmd.run()
so_file = cmd.get_outputs()[0]
finally:
os.chdir(old_wd)
assert os.path.exists(so_file)
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
assert so_file.endswith(ext_suffix)
so_dir = os.path.dirname(so_file)
assert so_dir == other_tmp_dir
cmd.inplace = False
cmd.compiler = None
cmd.run()
so_file = cmd.get_outputs()[0]
assert os.path.exists(so_file)
assert so_file.endswith(ext_suffix)
so_dir = os.path.dirname(so_file)
assert so_dir == cmd.build_lib
# inplace = False, cmd.package = 'bar'
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {'': 'bar'}
path = cmd.get_ext_fullpath('foo')
# checking that the last directory is the build_dir
path = os.path.split(path)[0]
assert path == cmd.build_lib
# inplace = True, cmd.package = 'bar'
cmd.inplace = True
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
path = cmd.get_ext_fullpath('foo')
finally:
os.chdir(old_wd)
# checking that the last directory is bar
path = os.path.split(path)[0]
lastdir = os.path.split(path)[-1]
assert lastdir == 'bar'
def test_ext_fullpath(self):
ext = sysconfig.get_config_var('EXT_SUFFIX')
# building lxml.etree inplace
# etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
# etree_ext = Extension('lxml.etree', [etree_c])
# dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
dist = Distribution()
cmd = self.build_ext(dist)
cmd.inplace = True
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
assert wanted == path
# building lxml.etree not inplace
cmd.inplace = False
cmd.build_lib = os.path.join(curdir, 'tmpdir')
wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
assert wanted == path
# building twisted.runner.portmap not inplace
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {}
cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner', 'portmap' + ext)
assert wanted == path
# building twisted.runner.portmap inplace
cmd.inplace = True
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
assert wanted == path
@pytest.mark.skipif('platform.system() != "Darwin"')
@pytest.mark.usefixtures('save_env')
def test_deployment_target_default(self):
# Issue 9516: Test that, in the absence of the environment variable,
# an extension module is compiled with the same deployment target as
# the interpreter.
self._try_compile_deployment_target('==', None)
@pytest.mark.skipif('platform.system() != "Darwin"')
@pytest.mark.usefixtures('save_env')
def test_deployment_target_too_low(self):
# Issue 9516: Test that an extension module is not allowed to be
# compiled with a deployment target less than that of the interpreter.
with pytest.raises(DistutilsPlatformError):
self._try_compile_deployment_target('>', '10.1')
@pytest.mark.skipif('platform.system() != "Darwin"')
@pytest.mark.usefixtures('save_env')
def test_deployment_target_higher_ok(self): # pragma: no cover
# Issue 9516: Test that an extension module can be compiled with a
# deployment target higher than that of the interpreter: the ext
# module may depend on some newer OS feature.
deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if deptarget:
# increment the minor version number (i.e. 10.6 -> 10.7)
deptarget = [int(x) for x in deptarget.split('.')]
deptarget[-1] += 1
deptarget = '.'.join(str(i) for i in deptarget)
self._try_compile_deployment_target('<', deptarget)
def _try_compile_deployment_target(self, operator, target): # pragma: no cover
if target is None:
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
del os.environ['MACOSX_DEPLOYMENT_TARGET']
else:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
jaraco.path.build(
{
'deptargetmodule.c': textwrap.dedent(f"""\
#include <AvailabilityMacros.h>
int dummy;
#if TARGET {operator} MAC_OS_X_VERSION_MIN_REQUIRED
#else
#error "Unexpected target"
#endif
"""),
},
self.tmp_path,
)
# get the deployment target that the interpreter was built with
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
target = tuple(map(int, target.split('.')[0:2]))
# format the target value as defined in the Apple
# Availability Macros. We can't use the macro names since
# at least one value we test with will not exist yet.
if target[:2] < (10, 10):
# for 10.1 through 10.9.x -> "10n0"
tmpl = '{:02}{:01}0'
else:
# for 10.10 and beyond -> "10nn00"
if len(target) >= 2:
tmpl = '{:02}{:02}00'
else:
# 11 and later can have no minor version (11 instead of 11.0)
tmpl = '{:02}0000'
target = tmpl.format(*target)
deptarget_ext = Extension(
'deptarget',
[self.tmp_path / 'deptargetmodule.c'],
extra_compile_args=[f'-DTARGET={target}'],
)
dist = Distribution({'name': 'deptarget', 'ext_modules': [deptarget_ext]})
dist.package_dir = self.tmp_dir
cmd = self.build_ext(dist)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
try:
old_stdout = sys.stdout
if not support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
except CompileError:
self.fail("Wrong deployment target during compilation")
| TestBuildExt |
python | plotly__plotly.py | plotly/graph_objs/scattercarpet/marker/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8579
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet.marker.colorbar"
_path_str = "scattercarpet.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
marker.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | numba__numba | numba/cuda/tests/cudapy/test_errors.py | {
"start": 162,
"end": 2620
} | class ____(CUDATestCase):
"""
Test compile-time errors with @jit.
"""
def test_too_many_dims(self):
kernfunc = cuda.jit(noop)
with self.assertRaises(ValueError) as raises:
kernfunc[(1, 2, 3, 4), (5, 6)]
self.assertIn("griddim must be a sequence of 1, 2 or 3 integers, "
"got [1, 2, 3, 4]",
str(raises.exception))
with self.assertRaises(ValueError) as raises:
kernfunc[(1, 2,), (3, 4, 5, 6)]
self.assertIn("blockdim must be a sequence of 1, 2 or 3 integers, "
"got [3, 4, 5, 6]",
str(raises.exception))
def test_non_integral_dims(self):
kernfunc = cuda.jit(noop)
with self.assertRaises(TypeError) as raises:
kernfunc[2.0, 3]
self.assertIn("griddim must be a sequence of integers, got [2.0]",
str(raises.exception))
with self.assertRaises(TypeError) as raises:
kernfunc[2, 3.0]
self.assertIn("blockdim must be a sequence of integers, got [3.0]",
str(raises.exception))
def _test_unconfigured(self, kernfunc):
with self.assertRaises(ValueError) as raises:
kernfunc(0)
self.assertIn("launch configuration was not specified",
str(raises.exception))
def test_unconfigured_typed_cudakernel(self):
kernfunc = cuda.jit("void(int32)")(noop)
self._test_unconfigured(kernfunc)
def test_unconfigured_untyped_cudakernel(self):
kernfunc = cuda.jit(noop)
self._test_unconfigured(kernfunc)
@skip_on_cudasim('TypingError does not occur on simulator')
def test_typing_error(self):
# see #5860, this is present to catch changes to error reporting
# accidentally breaking the CUDA target
@cuda.jit(device=True)
def dev_func(x):
# floor is deliberately not imported for the purpose of this test.
return floor(x) # noqa: F821
@cuda.jit
def kernel_func():
dev_func(1.5)
with self.assertRaises(TypingError) as raises:
kernel_func[1, 1]()
excstr = str(raises.exception)
self.assertIn("resolving callee type: type(CUDADispatcher", excstr)
self.assertIn("NameError: name 'floor' is not defined", excstr)
if __name__ == '__main__':
unittest.main()
| TestJitErrors |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-exceed-threshold-value-i.py | {
"start": 37,
"end": 234
} | class ____(object):
def minOperations(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
return sum(x < k for x in nums)
| Solution |
python | python-openxml__python-docx | tests/styles/test_style.py | {
"start": 2919,
"end": 14029
} | class ____:
def it_knows_its_style_id(self, id_get_fixture):
style, expected_value = id_get_fixture
assert style.style_id == expected_value
def it_can_change_its_style_id(self, id_set_fixture):
style, new_value, expected_xml = id_set_fixture
style.style_id = new_value
assert style._element.xml == expected_xml
def it_knows_its_type(self, type_get_fixture):
style, expected_value = type_get_fixture
assert style.type == expected_value
def it_knows_its_name(self, name_get_fixture):
style, expected_value = name_get_fixture
assert style.name == expected_value
def it_can_change_its_name(self, name_set_fixture):
style, new_value, expected_xml = name_set_fixture
style.name = new_value
assert style._element.xml == expected_xml
def it_knows_whether_its_a_builtin_style(self, builtin_get_fixture):
style, expected_value = builtin_get_fixture
assert style.builtin is expected_value
def it_knows_whether_its_hidden(self, hidden_get_fixture):
style, expected_value = hidden_get_fixture
assert style.hidden == expected_value
def it_can_change_whether_its_hidden(self, hidden_set_fixture):
style, value, expected_xml = hidden_set_fixture
style.hidden = value
assert style._element.xml == expected_xml
def it_knows_its_sort_order(self, priority_get_fixture):
style, expected_value = priority_get_fixture
assert style.priority == expected_value
def it_can_change_its_sort_order(self, priority_set_fixture):
style, value, expected_xml = priority_set_fixture
style.priority = value
assert style._element.xml == expected_xml
def it_knows_whether_its_unhide_when_used(self, unhide_get_fixture):
style, expected_value = unhide_get_fixture
assert style.unhide_when_used == expected_value
def it_can_change_its_unhide_when_used_value(self, unhide_set_fixture):
style, value, expected_xml = unhide_set_fixture
style.unhide_when_used = value
assert style._element.xml == expected_xml
def it_knows_its_quick_style_setting(self, quick_get_fixture):
style, expected_value = quick_get_fixture
assert style.quick_style == expected_value
def it_can_change_its_quick_style_setting(self, quick_set_fixture):
style, new_value, expected_xml = quick_set_fixture
style.quick_style = new_value
assert style._element.xml == expected_xml
def it_knows_whether_its_locked(self, locked_get_fixture):
style, expected_value = locked_get_fixture
assert style.locked == expected_value
def it_can_change_whether_its_locked(self, locked_set_fixture):
style, value, expected_xml = locked_set_fixture
style.locked = value
assert style._element.xml == expected_xml
def it_can_delete_itself_from_the_document(self, delete_fixture):
style, styles, expected_xml = delete_fixture
style.delete()
assert styles.xml == expected_xml
assert style._element is None
# fixture --------------------------------------------------------
@pytest.fixture(
params=[
("w:style", True),
("w:style{w:customStyle=0}", True),
("w:style{w:customStyle=1}", False),
]
)
def builtin_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture
def delete_fixture(self):
styles = element("w:styles/w:style")
style = BaseStyle(styles[0])
expected_xml = xml("w:styles")
return style, styles, expected_xml
@pytest.fixture(
params=[
("w:style", False),
("w:style/w:semiHidden", True),
("w:style/w:semiHidden{w:val=0}", False),
("w:style/w:semiHidden{w:val=1}", True),
]
)
def hidden_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", True, "w:style/w:semiHidden"),
("w:style/w:semiHidden{w:val=0}", True, "w:style/w:semiHidden"),
("w:style/w:semiHidden{w:val=1}", True, "w:style/w:semiHidden"),
("w:style", False, "w:style"),
("w:style/w:semiHidden", False, "w:style"),
("w:style/w:semiHidden{w:val=1}", False, "w:style"),
]
)
def hidden_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
@pytest.fixture(
params=[
("w:style", None),
("w:style{w:styleId=Foobar}", "Foobar"),
]
)
def id_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", "Foo", "w:style{w:styleId=Foo}"),
("w:style{w:styleId=Foo}", "Bar", "w:style{w:styleId=Bar}"),
("w:style{w:styleId=Bar}", None, "w:style"),
("w:style", None, "w:style"),
]
)
def id_set_fixture(self, request):
style_cxml, new_value, expected_style_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_style_cxml)
return style, new_value, expected_xml
@pytest.fixture(
params=[
("w:style", False),
("w:style/w:locked", True),
("w:style/w:locked{w:val=0}", False),
("w:style/w:locked{w:val=1}", True),
]
)
def locked_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", True, "w:style/w:locked"),
("w:style/w:locked{w:val=0}", True, "w:style/w:locked"),
("w:style/w:locked{w:val=1}", True, "w:style/w:locked"),
("w:style", False, "w:style"),
("w:style/w:locked", False, "w:style"),
("w:style/w:locked{w:val=1}", False, "w:style"),
]
)
def locked_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
@pytest.fixture(
params=[
("w:style{w:type=table}", None),
("w:style{w:type=table}/w:name{w:val=Boofar}", "Boofar"),
("w:style{w:type=table}/w:name{w:val=heading 1}", "Heading 1"),
]
)
def name_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", "Foo", "w:style/w:name{w:val=Foo}"),
("w:style/w:name{w:val=Foo}", "Bar", "w:style/w:name{w:val=Bar}"),
("w:style/w:name{w:val=Bar}", None, "w:style"),
]
)
def name_set_fixture(self, request):
style_cxml, new_value, expected_style_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_style_cxml)
return style, new_value, expected_xml
@pytest.fixture(
params=[
("w:style", None),
("w:style/w:uiPriority{w:val=42}", 42),
]
)
def priority_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", 42, "w:style/w:uiPriority{w:val=42}"),
("w:style/w:uiPriority{w:val=42}", 24, "w:style/w:uiPriority{w:val=24}"),
("w:style/w:uiPriority{w:val=24}", None, "w:style"),
]
)
def priority_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
@pytest.fixture(
params=[
("w:style", False),
("w:style/w:qFormat", True),
("w:style/w:qFormat{w:val=0}", False),
("w:style/w:qFormat{w:val=on}", True),
]
)
def quick_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", True, "w:style/w:qFormat"),
("w:style/w:qFormat", False, "w:style"),
("w:style/w:qFormat", True, "w:style/w:qFormat"),
("w:style/w:qFormat{w:val=0}", False, "w:style"),
("w:style/w:qFormat{w:val=on}", True, "w:style/w:qFormat"),
]
)
def quick_set_fixture(self, request):
style_cxml, new_value, expected_style_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_style_cxml)
return style, new_value, expected_xml
@pytest.fixture(
params=[
("w:style", WD_STYLE_TYPE.PARAGRAPH),
("w:style{w:type=paragraph}", WD_STYLE_TYPE.PARAGRAPH),
("w:style{w:type=character}", WD_STYLE_TYPE.CHARACTER),
("w:style{w:type=numbering}", WD_STYLE_TYPE.LIST),
]
)
def type_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", False),
("w:style/w:unhideWhenUsed", True),
("w:style/w:unhideWhenUsed{w:val=0}", False),
("w:style/w:unhideWhenUsed{w:val=1}", True),
]
)
def unhide_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(
params=[
("w:style", True, "w:style/w:unhideWhenUsed"),
("w:style/w:unhideWhenUsed", False, "w:style"),
("w:style/w:unhideWhenUsed{w:val=0}", True, "w:style/w:unhideWhenUsed"),
("w:style/w:unhideWhenUsed{w:val=1}", True, "w:style/w:unhideWhenUsed"),
("w:style/w:unhideWhenUsed{w:val=1}", False, "w:style"),
("w:style", False, "w:style"),
]
)
def unhide_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
| DescribeBaseStyle |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 36772,
"end": 37407
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, access_token: str, page_id: str):
"""Airbyte Source for Facebook Pages.
Documentation can be found at https://docs.airbyte.com/integrations/sources/facebook-pages
Args:
name (str): The name of the destination.
access_token (str): Facebook Page Access Token
page_id (str): Page ID
"""
self.access_token = check.str_param(access_token, "access_token")
self.page_id = check.str_param(page_id, "page_id")
super().__init__("Facebook Pages", name)
| FacebookPagesSource |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 63264,
"end": 63514
} | class ____(ActionTool):
''' A tool that allows to inspect and configure a model. '''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| ExamineTool |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 50835,
"end": 51749
} | class ____(Interface):
"""
Describes an :term:`asset`.
"""
def absspec():
"""
Returns the absolute asset specification for this asset
(e.g. ``mypackage:templates/foo.pt``).
"""
def abspath():
"""
Returns an absolute path in the filesystem to the asset.
"""
def stream():
"""
Returns an input stream for reading asset contents. Raises an
exception if the asset is a directory or does not exist.
"""
def isdir():
"""
Returns True if the asset is a directory, otherwise returns False.
"""
def listdir():
"""
Returns iterable of filenames of directory contents. Raises an
exception if asset is not a directory.
"""
def exists():
"""
Returns True if asset exists, otherwise returns False.
"""
| IAssetDescriptor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 10297,
"end": 12891
} | class ____(GithubStreamABC):
def __init__(self, repositories: List[str], page_size_for_large_streams: int, **kwargs):
super().__init__(**kwargs)
self.repositories = repositories
# GitHub pagination could be from 1 to 100.
# This parameter is deprecated and in future will be used sane default, page_size: 10
self.page_size = page_size_for_large_streams if self.large_stream else constants.DEFAULT_PAGE_SIZE
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"repos/{stream_slice['repository']}/{self.name}"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
for repository in self.repositories:
yield {"repository": repository}
def get_error_display_message(self, exception: BaseException) -> Optional[str]:
if (
isinstance(exception, DefaultBackoffException)
and exception.response.status_code == requests.codes.BAD_GATEWAY
and self.large_stream
and self.page_size > 1
):
return f'Please try to decrease the "Page size for large streams" below {self.page_size}. The stream "{self.name}" is a large stream, such streams can fail with 502 for high "page_size" values.'
return super().get_error_display_message(exception)
def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]:
record["repository"] = stream_slice["repository"]
if "reactions" in record and record["reactions"]:
reactions = record["reactions"]
if "+1" in reactions:
reactions["plus_one"] = reactions.pop("+1")
if "-1" in reactions:
reactions["minus_one"] = reactions.pop("-1")
return record
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if is_conflict_with_empty_repository(response):
# I would expect that this should be handled (skipped) by the error handler, but it seems like
# ignored this error but continue to processing records. This may be fixed in latest CDK versions.
return
yield from super().parse_response(
response=response,
stream_state=stream_state,
stream_slice=stream_slice,
next_page_token=next_page_token,
)
| GithubStream |
python | wandb__wandb | tests/unit_tests/test_ssl.py | {
"start": 279,
"end": 3590
} | class ____:
ca_path: Path
cert: Path
key: Path
@pytest.fixture(scope="session")
def ssl_creds(assets_path: Callable[[str], Path]) -> SSLCredPaths:
ca_path = assets_path("ssl_certs")
# don't hardcode the cert's filename, which has to be the hash of the cert
[cert_path] = ca_path.glob("*.0")
return SSLCredPaths(
ca_path=ca_path,
cert=cert_path,
key=ca_path / "localhost.key",
)
@pytest.fixture(scope="session")
def ssl_server(ssl_creds: SSLCredPaths) -> Iterator[http.server.HTTPServer]:
class MyServer(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def do_GET(self): # noqa: N802
body = b"Hello, world!"
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.send_header("Content-Length", str(len(body)))
self.send_header("Connection", "close")
self.end_headers()
self.wfile.write(body)
self.wfile.flush()
httpd = http.server.HTTPServer(("localhost", 0), MyServer)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(certfile=str(ssl_creds.cert), keyfile=str(ssl_creds.key))
httpd.socket = context.wrap_socket(httpd.socket, server_side=True)
ready_event = threading.Event()
def serve_with_signal():
ready_event.set()
httpd.serve_forever()
server_thread = threading.Thread(target=serve_with_signal, daemon=True)
server_thread.start()
# Wait for server to signal it's ready
ready_event.wait(timeout=2.0)
yield httpd
httpd.shutdown()
@pytest.mark.parametrize(
["env", "expect_disabled"],
[
({}, False),
({"WANDB_INSECURE_DISABLE_SSL": ""}, False),
({"WANDB_INSECURE_DISABLE_SSL": "false"}, False),
({"WANDB_INSECURE_DISABLE_SSL": "true"}, True),
],
)
def test_check_ssl_disabled(
env: Mapping[str, str],
expect_disabled: bool,
):
with patch.dict("os.environ", env):
assert expect_disabled == wandb.env.ssl_disabled()
@contextlib.contextmanager
def disable_ssl_context():
reset = wandb.apis._disable_ssl()
try:
yield
finally:
reset()
def test_disable_ssl(
ssl_server: http.server.HTTPServer,
):
url = f"https://{ssl_server.server_address[0]}:{ssl_server.server_address[1]}"
with pytest.raises(requests.exceptions.SSLError):
requests.get(url)
with disable_ssl_context():
with requests.get(url, stream=True) as resp:
assert resp.status_code == 200
@pytest.mark.parametrize(
"make_env",
[
lambda certpath: {"REQUESTS_CA_BUNDLE": str(certpath)},
lambda certpath: {"REQUESTS_CA_BUNDLE": str(certpath.parent)},
],
)
def test_uses_userspecified_custom_ssl_certs(
ssl_creds: SSLCredPaths,
ssl_server: http.server.HTTPServer,
make_env: Callable[[Path], Mapping[str, str]],
):
url = f"https://{ssl_server.server_address[0]}:{ssl_server.server_address[1]}"
with pytest.raises(requests.exceptions.SSLError):
requests.get(url)
with patch.dict("os.environ", make_env(ssl_creds.cert)):
with requests.get(url, stream=True) as resp:
assert resp.status_code == 200
| SSLCredPaths |
python | django__django | tests/admin_filters/tests.py | {
"start": 3374,
"end": 3981
} | class ____(SimpleListFilter):
title = "department"
parameter_name = "department"
def lookups(self, request, model_admin):
return sorted(
{
(
employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code,
)
for employee in model_admin.get_queryset(request)
}
)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
| DepartmentListFilterLookupWithNonStringValue |
python | ipython__ipython | IPython/core/historyapp.py | {
"start": 4518,
"end": 5163
} | class ____(HistoryTrim):
description = clear_hist_help
keep = Int(0, help="Number of recent lines to keep in the database.")
force = Bool(False, help="Don't prompt user for confirmation").tag(config=True)
flags = Dict( # type: ignore
dict(
force=({"HistoryClear": {"force": True}}, force.help),
f=({"HistoryTrim": {"force": True}}, force.help),
)
)
aliases = Dict() # type: ignore
def start(self):
if self.force or ask_yes_no(
"Really delete all ipython history? ", default="no", interrupt="no"
):
HistoryTrim.start(self)
| HistoryClear |
python | scikit-learn__scikit-learn | sklearn/utils/_mocking.py | {
"start": 12570,
"end": 13699
} | class ____(BaseEstimator):
"""Estimator for which we can turn on/off the prediction methods.
Parameters
----------
response_methods: list of \
{"predict", "predict_proba", "decision_function"}, default=None
List containing the response implemented by the estimator. When, the
response is in the list, it will return the name of the response method
when called. Otherwise, an `AttributeError` is raised. It allows to
use `getattr` as any conventional estimator. By default, no response
methods are mocked.
"""
def __init__(self, response_methods=None):
self.response_methods = response_methods
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
@available_if(_check_response("predict"))
def predict(self, X):
return "predict"
@available_if(_check_response("predict_proba"))
def predict_proba(self, X):
return "predict_proba"
@available_if(_check_response("decision_function"))
def decision_function(self, X):
return "decision_function"
| _MockEstimatorOnOffPrediction |
python | pytest-dev__pytest | src/_pytest/outcomes.py | {
"start": 1180,
"end": 1870
} | class ____(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = "builtins"
def __init__(
self,
msg: str | None = None,
pytrace: bool = True,
allow_module_level: bool = False,
*,
_use_item_location: bool = False,
) -> None:
super().__init__(msg=msg, pytrace=pytrace)
self.allow_module_level = allow_module_level
# If true, the skip location is reported as the item's location,
# instead of the place that raises the exception/calls skip().
self._use_item_location = _use_item_location
| Skipped |
python | walkccc__LeetCode | solutions/529. Minesweeper/529.py | {
"start": 0,
"end": 986
} | class ____:
def updateBoard(self, board: list[list[str]],
click: list[int]) -> list[list[str]]:
i, j = click
if board[i][j] == 'M':
board[i][j] = 'X'
return board
DIRS = ((-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1))
def getMinesCount(i: int, j: int) -> int:
minesCount = 0
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == len(board) or y < 0 or y == len(board[0]):
continue
if board[x][y] == 'M':
minesCount += 1
return minesCount
def dfs(i: int, j: int) -> None:
if i < 0 or i == len(board) or j < 0 or j == len(board[0]):
return
if board[i][j] != 'E':
return
minesCount = getMinesCount(i, j)
board[i][j] = 'B' if minesCount == 0 else str(minesCount)
if minesCount == 0:
for dx, dy in DIRS:
dfs(i + dx, j + dy)
dfs(i, j)
return board
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/script_ops_test.py | {
"start": 3774,
"end": 5390
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_numpy_arguments(self):
def plus(a, b):
return a + b
actual_result = script_ops.numpy_function(plus, [1, 2], dtypes.int32)
expect_result = constant_op.constant(3, dtypes.int32)
self.assertAllEqual(actual_result, expect_result)
def test_stateless(self):
call_count = 0
def plus(a, b):
nonlocal call_count
call_count += 1
return a + b
@def_function.function
def numpy_func_stateless(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=False)
@def_function.function
def func_stateless(a, b):
sum1 = numpy_func_stateless(a, b)
sum2 = numpy_func_stateless(a, b)
return sum1 + sum2
self.evaluate(func_stateless(
constant_op.constant(1),
constant_op.constant(2),
))
self.assertIn(call_count, (1, 2)) # as stateless, func may be deduplicated
def test_stateful(self):
call_count = 0
def plus(a, b):
nonlocal call_count
call_count += 1
return a + b
@def_function.function
def numpy_func_stateful(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=True)
@def_function.function
def func_stateful(a, b):
sum1 = numpy_func_stateful(a, b)
sum2 = numpy_func_stateful(a, b)
return sum1 + sum2
self.evaluate(func_stateful(
constant_op.constant(1),
constant_op.constant(2),
))
self.assertEqual(call_count,
2) # as stateful, func is guaranteed to execute twice
| NumpyFunctionTest |
python | wandb__wandb | wandb/automations/actions.py | {
"start": 5722,
"end": 6707
} | class ____(_BaseActionInput, GenericWebhookActionInput):
"""Defines an automation action that sends a webhook request."""
action_type: Literal[ActionType.GENERIC_WEBHOOK] = ActionType.GENERIC_WEBHOOK
integration_id: GQLId
"""The ID of the webhook integration that will be used to send the request."""
# overrides the generated field type to parse/serialize JSON strings
request_payload: Optional[JsonEncoded[dict[str, Any]]] = Field( # type: ignore[assignment]
default=None, alias="requestPayload"
)
"""The payload, possibly with template variables, to send in the webhook request."""
@classmethod
def from_integration(
cls,
integration: WebhookIntegration,
*,
payload: Optional[JsonEncoded[dict[str, Any]]] = None,
) -> Self:
"""Define a webhook action that sends to the given (webhook) integration."""
return cls(integration_id=integration.id, request_payload=payload)
| SendWebhook |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 6726,
"end": 6981
} | class ____(PackageTemplate):
"""Provides appropriate overrides for Go-module-based packages"""
base_class_name = "GoPackage"
package_class_import = "from spack_repo.builtin.build_systems.go import GoPackage"
body_def = ""
| GoPackageTemplate |
python | great-expectations__great_expectations | tests/expectations/test_expectation.py | {
"start": 1698,
"end": 10028
} | class ____(ColumnPairMapExpectation):
map_metric = "fake_pair_metric"
@pytest.fixture
def metrics_dict():
"""
Fixture for metrics dict, which represents Metrics already calculated for given Batch
"""
return {
(
"column_values.nonnull.unexpected_count",
"e197e9d84e4f8aa077b8dd5f9042b382",
(),
): "i_exist"
}
def fake_metrics_config_list(
metric_name: str, metric_domain_kwargs: Dict[str, Any]
) -> List[MetricConfiguration]:
"""
Helper method to generate list of MetricConfiguration objects for tests.
"""
return [
MetricConfiguration(
metric_name=metric_name,
metric_domain_kwargs=metric_domain_kwargs,
metric_value_kwargs={},
)
]
def fake_expectation_config(
expectation_type: str, config_kwargs: Dict[str, Any]
) -> ExpectationConfiguration:
"""
Helper method to generate of ExpectationConfiguration objects for tests.
"""
return ExpectationConfiguration(
type=expectation_type,
kwargs=config_kwargs,
)
@pytest.mark.unit
@pytest.mark.parametrize(
"fake_expectation_cls, config",
[
(
FakeMulticolumnExpectation,
fake_expectation_config(
"fake_multicolumn_expectation", {"column_list": ["column_1", "column_2"]}
),
),
(
FakeColumnMapExpectation,
fake_expectation_config("fake_column_map_expectation", {"column": "col"}),
),
(
FakeColumnPairMapExpectation,
fake_expectation_config(
"fake_column_pair_map_expectation",
{"column_A": "colA", "column_B": "colB"},
),
),
],
)
def test_multicolumn_expectation_has_default_mostly(fake_expectation_cls, config):
try:
fake_expectation = fake_expectation_cls(**config.kwargs)
except Exception:
assert False, "Validate configuration threw an error when testing default mostly value"
assert fake_expectation._get_success_kwargs().get("mostly") == 1, (
"Default mostly success ratio is not 1"
)
@pytest.mark.unit
@pytest.mark.parametrize(
"fake_expectation_cls, config",
itertools.chain(
*[
[
(
FakeMulticolumnExpectation,
fake_expectation_config(
"fake_multicolumn_expectation",
{"column_list": ["column_1", "column_2"], "mostly": x},
),
)
for x in [0, 0.5, 1]
],
[
(
FakeColumnMapExpectation,
fake_expectation_config(
"fake_column_map_expectation", {"column": "col", "mostly": x}
),
)
for x in [0, 0.5, 1]
],
[
(
FakeColumnPairMapExpectation,
fake_expectation_config(
"fake_column_pair_map_expectation",
{"column_A": "colA", "column_B": "colB", "mostly": x},
),
)
for x in [0, 0.5, 1]
],
]
),
)
def test_expectation_succeeds_with_valid_mostly(fake_expectation_cls, config):
fake_expectation = fake_expectation_cls(**config.kwargs)
assert fake_expectation._get_success_kwargs().get("mostly") == config.kwargs["mostly"], (
"Default mostly success ratio is not 1"
)
@pytest.mark.unit
@pytest.mark.parametrize(
"fake_expectation_cls, config",
[
(
FakeMulticolumnExpectation,
fake_expectation_config(
"fake_multicolumn_expectation",
{"column_list": ["column_1", "column_2"], "mostly": -0.5},
),
),
(
FakeColumnMapExpectation,
fake_expectation_config(
"fake_column_map_expectation", {"column": "col", "mostly": 1.5}
),
),
(
FakeColumnPairMapExpectation,
fake_expectation_config(
"fake_column_pair_map_expectation",
{"column_A": "colA", "column_B": "colB", "mostly": -1},
),
),
],
)
def test_multicolumn_expectation_validation_errors_with_bad_mostly(fake_expectation_cls, config):
with pytest.raises(pydantic.ValidationError):
fake_expectation_cls(**config)
@pytest.mark.unit
def test_validate_dependencies_against_available_metrics_success(metrics_dict):
metric_config_list: List[MetricConfiguration] = fake_metrics_config_list(
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs={
"batch_id": "projects-projects",
"column": "i_exist",
},
)
_validate_dependencies_against_available_metrics(
validation_dependencies=metric_config_list,
metrics=metrics_dict,
)
@pytest.mark.unit
def test_validate_dependencies_against_available_metrics_failure(metrics_dict):
metric_config_list: List[MetricConfiguration] = fake_metrics_config_list(
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs={
"batch_id": "projects-projects",
"column": "i_dont_exist",
},
)
with pytest.raises(InvalidExpectationConfigurationError):
_validate_dependencies_against_available_metrics(
validation_dependencies=metric_config_list,
metrics=metrics_dict,
)
@pytest.mark.unit
def test_expectation_configuration_property():
expectation = gxe.ExpectColumnMaxToBeBetween(column="foo", min_value=0, max_value=10)
assert expectation.configuration == ExpectationConfiguration(
type="expect_column_max_to_be_between",
kwargs={
"column": "foo",
"min_value": 0,
"max_value": 10,
},
)
@pytest.mark.unit
def test_expectation_configuration_window():
expectation = gxe.ExpectColumnMaxToBeBetween(
column="foo",
min_value=0,
max_value=10,
windows=[
Window(
constraint_fn="a",
parameter_name="b",
range=5,
offset=Offset(positive=0.2, negative=0.2),
strict=True,
)
],
)
assert expectation.configuration == ExpectationConfiguration(
type="expect_column_max_to_be_between",
kwargs={
"column": "foo",
"min_value": 0,
"max_value": 10,
"windows": [
{
"constraint_fn": "a",
"parameter_name": "b",
"range": 5,
"offset": {"positive": 0.2, "negative": 0.2},
"strict": True,
}
],
},
)
@pytest.mark.unit
def test_expectation_configuration_window_empty():
expectation = gxe.ExpectColumnMaxToBeBetween(
column="foo",
min_value=0,
max_value=10,
windows=None,
)
assert expectation.configuration == ExpectationConfiguration(
type="expect_column_max_to_be_between",
kwargs={
"column": "foo",
"min_value": 0,
"max_value": 10,
},
)
@pytest.mark.unit
def test_expectation_configuration_property_recognizes_state_changes():
expectation = gxe.ExpectColumnMaxToBeBetween(column="foo", min_value=0, max_value=10)
expectation.column = "bar"
expectation.min_value = 5
expectation.max_value = 15
assert expectation.configuration == ExpectationConfiguration(
type="expect_column_max_to_be_between",
kwargs={
"column": "bar",
"min_value": 5,
"max_value": 15,
},
)
@pytest.mark.unit
def test_unrecognized_expectation_arg_raises_error():
with pytest.raises(pydantic.ValidationError, match="extra fields not permitted"):
gxe.ExpectColumnMaxToBeBetween(
column="foo",
min_value=0,
max_value=10,
mostyl=0.95, # 'mostly' typo
)
| FakeColumnPairMapExpectation |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_root_cause_analysis.py | {
"start": 5452,
"end": 7157
} | class ____(OrganizationEventsEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request, organization):
# TODO: Extract this into a custom serializer to handle validation
transaction_name = request.GET.get("transaction")
project_id = request.GET.get("project")
regression_breakpoint = request.GET.get("breakpoint")
limit = int(request.GET.get("per_page", DEFAULT_LIMIT))
span_score_threshold = int(
request.GET.get("span_score_threshold", SPAN_ANALYSIS_SCORE_THRESHOLD)
)
if not transaction_name or not project_id or not regression_breakpoint:
# Project ID is required to ensure the events we query for are
# the same transaction
return Response(status=400)
regression_breakpoint = parse_datetime_string(regression_breakpoint)
snuba_params = self.get_snuba_params(request, organization)
with handle_query_errors():
transaction_count_query = metrics_query(
["count()"],
f'event.type:transaction transaction:"{transaction_name}"',
referrer=BASE_REFERRER,
snuba_params=snuba_params,
)
if transaction_count_query["data"][0]["count"] == 0:
return Response(status=400, data="Transaction not found")
results = fetch_span_analysis_results(
transaction_name,
regression_breakpoint,
snuba_params,
project_id,
limit,
span_score_threshold,
)
return Response(results, status=200)
| OrganizationEventsRootCauseAnalysisEndpoint |
python | mamba-org__mamba | libmambapy-stubs/setup.py | {
"start": 115,
"end": 1314
} | class ____(setuptools.command.build_py.build_py):
def run(self):
"""Generate stub files."""
options = mypy.stubgen.Options(
pyversion=sys.version_info[:2],
no_import=False,
inspect=True,
doc_dir="",
search_path=[],
interpreter=sys.executable,
parse_only=False,
ignore_errors=False,
include_private=False,
output_dir=self.build_lib,
modules=[],
packages=["libmambapy", "libmambapy.bindings"],
files=[],
verbose=False,
quiet=False,
export_less=False,
include_docstrings=False,
)
mypy.stubgen.generate_stubs(options)
os.rename(
src=os.path.join(self.build_lib, "libmambapy"),
dst=os.path.join(self.build_lib, "libmambapy-stubs"),
)
super().run()
setuptools.setup(
name="libmambapy-stubs",
version=libmambapy.__version__,
install_requires=[f"libmambapy=={libmambapy.__version__}"],
packages=["src"],
package_data={"libmambapy-stubs": ["**/*.pyi"]},
cmdclass={"build_py": build_py},
)
| build_py |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1118840,
"end": 1119492
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'converted_note_to_issue' event on a given issue or
pull request.
"""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "database_id")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
| ConvertedNoteToIssueEvent |
python | pallets__werkzeug | src/werkzeug/datastructures/mixins.py | {
"start": 1932,
"end": 3917
} | class ____(t.Generic[K, V]):
"""Makes a :class:`dict` immutable.
.. versionchanged:: 3.1
Disallow ``|=`` operator.
.. versionadded:: 0.5
:private:
"""
_hash_cache: int | None = None
@classmethod
@t.overload
def fromkeys(
cls, keys: cabc.Iterable[K], value: None
) -> ImmutableDictMixin[K, t.Any | None]: ...
@classmethod
@t.overload
def fromkeys(cls, keys: cabc.Iterable[K], value: V) -> ImmutableDictMixin[K, V]: ...
@classmethod
def fromkeys(
cls, keys: cabc.Iterable[K], value: V | None = None
) -> ImmutableDictMixin[K, t.Any | None] | ImmutableDictMixin[K, V]:
instance = super().__new__(cls)
instance.__init__(zip(keys, repeat(value))) # type: ignore[misc]
return instance
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
return type(self), (dict(self),) # type: ignore[call-overload]
def _iter_hashitems(self) -> t.Iterable[t.Any]:
return self.items() # type: ignore[attr-defined,no-any-return]
def __hash__(self) -> int:
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key: t.Any, default: t.Any = None) -> t.NoReturn:
_immutable_error(self)
def update(self, arg: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
_immutable_error(self)
def __ior__(self, other: t.Any) -> t.NoReturn:
_immutable_error(self)
def pop(self, key: t.Any, default: t.Any = None) -> t.NoReturn:
_immutable_error(self)
def popitem(self) -> t.NoReturn:
_immutable_error(self)
def __setitem__(self, key: t.Any, value: t.Any) -> t.NoReturn:
_immutable_error(self)
def __delitem__(self, key: t.Any) -> t.NoReturn:
_immutable_error(self)
def clear(self) -> t.NoReturn:
_immutable_error(self)
| ImmutableDictMixin |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 52341,
"end": 52658
} | class ____(sgqlc.types.Enum):
"""The possible organization invitation types.
Enumeration Choices:
* `EMAIL`: The invitation was to an email address.
* `USER`: The invitation was to an existing user.
"""
__schema__ = github_schema
__choices__ = ("EMAIL", "USER")
| OrganizationInvitationType |
python | ZoranPandovski__al-go-rithms | games/minimax/TicTacToe_AI.py | {
"start": 593,
"end": 5478
} | class ____(object):
def __init__(self, board):
self.board = board
# printing current board...
count = 0
print()
for each in board:
count += 1
if(count % 3 != 0):
print(board[each] , end=" | ")
else:
print(board[each])
if(count != 9):
print('-'*10)
def isSpacefree(self, position):
if(position in range(10) and board[position] == " "):
return True
else:
return False
def checkWin(self, board):
# row
if(board[1] == board[2] == board[3] != " "):
return [True, board[1]]
elif(board[4] == board[5] == board[6] != " "):
return [True, board[4]]
elif(board[7] == board[8] == board[9] != " "):
return [True, board[7]]
# column
elif(board[1] == board[4] == board[7] != " "):
return [True, board[1]]
elif(board[2] == board[5] == board[8] != " "):
return [True, board[2]]
elif(board[3] == board[6] == board[9] != " "):
return [True, board[3]]
# diagonal
elif(board[1] == board[5] == board[9] != " "):
return [True, board[1]]
elif(board[3] == board[5] == board[7] != " "):
return [True, board[3]]
else:
return [False]
# if board is full and no winner..
def checkDraw(self, board):
# if board is fully filled...
if(not any([True if board[x] == ' ' else False for x in board])):
return True
return False
def insertPosition(self, position, letter = 'O'):
if(self.isSpacefree(position)):
board[position] = letter
self.__init__(board)
res = self.checkWin(board)
if(res[0]):
print(f' Player {res[1]} won ')
exit()
# if board is fully filled and none is winner then it is a draw...
if(not any([True if board[x] == ' ' else False for x in board]) and not res[0]):
print(" *** Draw *** ")
exit()
else:
position = int(input(" ==>> Space is either not free or NOT valid choose valid position.. "))
self.insertPosition(position)
def playerMove(self, letter='O'):
position = int(input(" You move - Choose Position : "))
self.insertPosition(position, letter)
def MiniMax(self, board, depth, isMaximizing):
# check for base conditions who won..
winner = self.checkWin(board)
if(winner[0]):
# 'X' = bot, 'O' = player
# return 1 if(winner[1] == 'X') else -1
empty_spaces = [1 if board[x] == ' ' else 0 for x in board].count(1)
if(winner[1] == 'X'):
return 1*(empty_spaces + 1)
else:
return -1*(empty_spaces + 1)
elif(self.checkDraw(board)):
return 0
if(isMaximizing):
bestScore = -infinity
for each in board:
if(board[each] == " "):
board[each] = 'X'
# calling the minimax function for letter 'O'
score = self.MiniMax(board, 0, False)
# undo the move for next iter check
board[each] = " "
bestScore = max(score, bestScore)
return bestScore
else:
bestScore = infinity
for each in board:
if(board[each] == " "):
board[each] = 'O'
# calling the minimax function with letter 'X' for maximization
score = self.MiniMax(board, 0, True)
# undo the move for next iter check
board[each] = " "
bestScore = min(score, bestScore)
return bestScore
def computerMove(self, letter='X'):
print(" Computer Playing his move..")
bestMove = 0
bestScore = -infinity
for each in board:
if(board[each] == " "):
board[each] = letter
# calling the minimax function for letter 'O'
score = self.MiniMax(board, 0, False)
# undo the move for next iter check
board[each] = " "
# maxEval = max(eval, maxEval)
if(score > bestScore):
bestScore = score
bestMove = each
self.insertPosition(bestMove, letter)
return
demoBoard(board)
obj = TicTacToe(board)
# loop will run until game is not over..
while(True):
obj.computerMove('X')
obj.playerMove('O')
# end
| TicTacToe |
python | joke2k__faker | faker/providers/phone_number/de_CH/__init__.py | {
"start": 49,
"end": 1594
} | class ____(PhoneNumberProvider):
"""Phone number provider for `de_CH` locale.
Sources:
- https://de.wikipedia.org/wiki/Telefonnummer_(Schweiz)
"""
dialing_codes = (
"75",
"76",
"77",
"78",
"79",
)
landline_codes = (
"21",
"22",
"24",
"26",
"27",
"31",
"32",
"33",
"34",
"43",
"41",
"44",
"52",
"55",
"56",
"61",
"62",
"71",
"81",
"91",
)
cellphone_formats = (
"+41 {{dialing_code}} ### ## ##",
"0{{dialing_code}} ### ## ##",
)
landline_formats = (
"+41 {{landline_code}} ### ## ##",
"0{{landline_code}} ### ## ##",
)
"""
Get dialing code for cellphone numbers.
"""
def dialing_code(self) -> str:
return self.random_element(self.dialing_codes)
"""
Get dialing code for landlines.
"""
def landline_code(self) -> str:
return self.random_element(self.landline_codes)
"""
Get a landline phone number.
"""
def phone_number(self) -> str:
pattern: str = self.random_element(self.landline_formats)
return self.numerify(self.generator.parse(pattern))
"""
Get a cellphone number.
"""
def cellphone_number(self) -> str:
pattern: str = self.random_element(self.cellphone_formats)
return self.numerify(self.generator.parse(pattern))
| Provider |
python | aio-libs__aiohttp | aiohttp/helpers.py | {
"start": 2615,
"end": 7074
} | class ____(namedtuple("BasicAuth", ["login", "password", "encoding"])):
"""Http basic authentication helper."""
def __new__(
cls, login: str, password: str = "", encoding: str = "latin1"
) -> "BasicAuth":
if login is None:
raise ValueError("None is not allowed as login value")
if password is None:
raise ValueError("None is not allowed as password value")
if ":" in login:
raise ValueError('A ":" is not allowed in login (RFC 1945#section-11.1)')
return super().__new__(cls, login, password, encoding)
@classmethod
def decode(cls, auth_header: str, encoding: str = "latin1") -> "BasicAuth":
"""Create a BasicAuth object from an Authorization HTTP header."""
try:
auth_type, encoded_credentials = auth_header.split(" ", 1)
except ValueError:
raise ValueError("Could not parse authorization header.")
if auth_type.lower() != "basic":
raise ValueError("Unknown authorization method %s" % auth_type)
try:
decoded = base64.b64decode(
encoded_credentials.encode("ascii"), validate=True
).decode(encoding)
except binascii.Error:
raise ValueError("Invalid base64 encoding.")
try:
# RFC 2617 HTTP Authentication
# https://www.ietf.org/rfc/rfc2617.txt
# the colon must be present, but the username and password may be
# otherwise blank.
username, password = decoded.split(":", 1)
except ValueError:
raise ValueError("Invalid credentials.")
return cls(username, password, encoding=encoding)
@classmethod
def from_url(cls, url: URL, *, encoding: str = "latin1") -> Optional["BasicAuth"]:
"""Create BasicAuth from url."""
if not isinstance(url, URL):
raise TypeError("url should be yarl.URL instance")
# Check raw_user and raw_password first as yarl is likely
# to already have these values parsed from the netloc in the cache.
if url.raw_user is None and url.raw_password is None:
return None
return cls(url.user or "", url.password or "", encoding=encoding)
def encode(self) -> str:
"""Encode credentials."""
creds = (f"{self.login}:{self.password}").encode(self.encoding)
return "Basic %s" % base64.b64encode(creds).decode(self.encoding)
def strip_auth_from_url(url: URL) -> tuple[URL, BasicAuth | None]:
"""Remove user and password from URL if present and return BasicAuth object."""
# Check raw_user and raw_password first as yarl is likely
# to already have these values parsed from the netloc in the cache.
if url.raw_user is None and url.raw_password is None:
return url, None
return url.with_user(None), BasicAuth(url.user or "", url.password or "")
def netrc_from_env() -> netrc.netrc | None:
"""Load netrc from file.
Attempt to load it from the path specified by the env-var
NETRC or in the default location in the user's home directory.
Returns None if it couldn't be found or fails to parse.
"""
netrc_env = os.environ.get("NETRC")
if netrc_env is not None:
netrc_path = Path(netrc_env)
else:
try:
home_dir = Path.home()
except RuntimeError as e:
# if pathlib can't resolve home, it may raise a RuntimeError
client_logger.debug(
"Could not resolve home directory when "
"trying to look for .netrc file: %s",
e,
)
return None
netrc_path = home_dir / (
"_netrc" if platform.system() == "Windows" else ".netrc"
)
try:
return netrc.netrc(str(netrc_path))
except netrc.NetrcParseError as e:
client_logger.warning("Could not parse .netrc file: %s", e)
except OSError as e:
netrc_exists = False
with contextlib.suppress(OSError):
netrc_exists = netrc_path.is_file()
# we couldn't read the file (doesn't exist, permissions, etc.)
if netrc_env or netrc_exists:
# only warn if the environment wanted us to load it,
# or it appears like the default file does actually exist
client_logger.warning("Could not read .netrc file: %s", e)
return None
@frozen_dataclass_decorator
| BasicAuth |
python | getsentry__sentry | src/sentry/api/fields/avatar.py | {
"start": 416,
"end": 551
} | class ____(SentryAPIException):
status_code = 413
default_detail = "Image too large"
default_code = "too_large"
| ImageTooLarge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.