language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_sampling_project_span_counts.py | {
"start": 484,
"end": 7054
} | class ____(MetricsEnhancedPerformanceTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.project_1 = self.create_project(organization=self.org, name="project_1")
self.project_2 = self.create_project(organization=self.org, name="project_2")
self.project_3 = self.create_project(organization=self.org, name="project_3")
self.project_4 = self.create_project(organization=self.org, name="project_4")
self.url = reverse(
"sentry-api-0-organization-sampling-root-counts",
kwargs={"organization_id_or_slug": self.org.slug},
)
metric_data = (
(self.project_1.id, self.project_2.id, 12),
(self.project_1.id, self.project_3.id, 13),
(self.project_2.id, self.project_1.id, 21),
)
hour_ago = self.MOCK_DATETIME - timedelta(hours=1)
days_ago = self.MOCK_DATETIME - timedelta(days=5)
fifty_days_ago = self.MOCK_DATETIME - timedelta(days=50)
for project_source_id, target_project_id, span_count in metric_data:
self.store_metric(
org_id=self.org.id,
value=span_count,
project_id=int(project_source_id),
mri=SpanMRI.COUNT_PER_ROOT_PROJECT.value,
tags={"target_project_id": str(target_project_id)},
timestamp=int(hour_ago.timestamp()),
)
self.store_metric(
org_id=self.org.id,
value=span_count,
project_id=int(project_source_id),
mri=SpanMRI.COUNT_PER_ROOT_PROJECT.value,
tags={"target_project_id": str(target_project_id)},
timestamp=int(days_ago.timestamp()),
)
self.store_metric(
org_id=self.org.id,
value=span_count,
project_id=int(project_source_id),
mri=SpanMRI.COUNT_PER_ROOT_PROJECT.value,
tags={"target_project_id": str(target_project_id)},
timestamp=int(fifty_days_ago.timestamp()),
)
def test_feature_flag_required(self) -> None:
response = self.client.get(self.url)
assert response.status_code == 404
@django_db_all
def test_get_span_counts_without_permission(self) -> None:
user = self.create_user()
self.login_as(user)
with self.feature("organizations:dynamic-sampling-custom"):
response = self.client.get(
self.url,
data={"statsPeriod": "24h"},
)
assert response.status_code == 403
@django_db_all
def test_get_span_counts_with_ingested_data_24h(self) -> None:
"""Test span counts endpoint with actual ingested metrics data"""
with self.feature("organizations:dynamic-sampling-custom"):
response = self.client.get(
self.url,
data={"statsPeriod": "24h"},
)
assert response.status_code == 200
data = response.data # type: ignore[attr-defined]
span_counts = sorted(data["data"][0], key=lambda x: x["by"]["target_project_id"])
assert span_counts[0]["by"]["project"] == self.project_2.name
assert span_counts[0]["by"]["target_project_id"] == str(self.project_1.id)
assert span_counts[0]["totals"] == 21.0
assert span_counts[1]["by"]["project"] == self.project_1.name
assert span_counts[1]["by"]["target_project_id"] == str(self.project_2.id)
assert span_counts[1]["totals"] == 12.0
assert span_counts[2]["by"]["project"] == self.project_1.name
assert span_counts[2]["by"]["target_project_id"] == str(self.project_3.id)
assert span_counts[2]["totals"] == 13.0
assert data["end"] == MetricsEnhancedPerformanceTestCase.MOCK_DATETIME
assert (data["end"] - data["start"]) == timedelta(days=1)
@django_db_all
def test_get_span_counts_with_ingested_data_30d(self) -> None:
with self.feature("organizations:dynamic-sampling-custom"):
response = self.client.get(
self.url,
data={"statsPeriod": "30d"},
)
assert response.status_code == 200
data = response.data # type: ignore[attr-defined]
span_counts = sorted(data["data"][0], key=lambda x: x["by"]["target_project_id"])
assert span_counts[0]["by"]["project"] == self.project_2.name
assert span_counts[0]["by"]["target_project_id"] == str(self.project_1.id)
assert span_counts[0]["totals"] == 21.0 * 2
assert span_counts[1]["by"]["project"] == self.project_1.name
assert span_counts[1]["by"]["target_project_id"] == str(self.project_2.id)
assert span_counts[1]["totals"] == 12.0 * 2
assert span_counts[2]["by"]["project"] == self.project_1.name
assert span_counts[2]["by"]["target_project_id"] == str(self.project_3.id)
assert span_counts[2]["totals"] == 13.0 * 2
assert data["end"] == MetricsEnhancedPerformanceTestCase.MOCK_DATETIME
assert (data["end"] - data["start"]) == timedelta(days=30)
@django_db_all
def test_get_span_counts_with_many_projects(self) -> None:
# Create 200 projects with incrementing span counts
projects = []
days_ago = self.MOCK_DATETIME - timedelta(days=5)
for i in range(200):
project = self.create_project(organization=self.org, name=f"gen_project_{i}")
projects.append(project)
self.store_metric(
org_id=self.org.id,
value=i,
project_id=int(project.id),
mri=SpanMRI.COUNT_PER_ROOT_PROJECT.value,
tags={"target_project_id": str(self.project_1.id)},
timestamp=int(days_ago.timestamp()),
)
with self.feature("organizations:dynamic-sampling-custom"):
response = self.client.get(
self.url,
data={"statsPeriod": "30d"},
)
assert response.status_code == 200
data = response.data # type: ignore[attr-defined]
span_counts = sorted(data["data"][0], key=lambda x: x["totals"], reverse=True)
# Verify we get all 200 projects back
assert len(span_counts) >= 200
@freeze_time(MetricsEnhancedPerformanceTestCase.MOCK_DATETIME)
@region_silo_test
| OrganizationSamplingProjectSpanCountsTest |
python | Netflix__metaflow | metaflow/util.py | {
"start": 1277,
"end": 21035
} | class ____(object):
# Provide a temporary directory since Python 2.7 does not have it inbuilt
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
def cached_property(getter):
@wraps(getter)
def exec_once(self):
saved_name = "__%s" % getter.__name__
if not hasattr(self, saved_name):
setattr(self, saved_name, getter(self))
return getattr(self, saved_name)
return property(exec_once)
def all_equal(it):
"""
Return True if all elements of the given iterator are equal.
"""
it = iter(it)
try:
first = next(it)
except StopIteration:
return True
for x in it:
if x != first:
return False
return True
def url_quote(url):
"""
Encode a unicode URL to a safe byte string
"""
# quote() works reliably only with (byte)strings in Python2,
# hence we need to .encode('utf-8') first. To see by yourself,
# try quote(u'\xff') in python2. Python3 converts the output
# always to Unicode, hence we need the outer to_bytes() too.
#
# We mark colon as a safe character to keep simple ASCII urls
# nice looking, e.g. "http://google.com"
return to_bytes(quote(to_bytes(url), safe="/:"))
def url_unquote(url_bytes):
"""
Decode a byte string encoded with url_quote to a unicode URL
"""
return unquote_bytes(url_bytes)
def is_stringish(x):
"""
Returns true if the object is a unicode or a bytes object
"""
return isinstance(x, bytes_type) or isinstance(x, unicode_type)
def to_fileobj(x):
"""
Convert any string-line object to a byte-returning fileobj
"""
return BytesIO(to_bytes(x))
def to_unicode(x):
"""
Convert any object to a unicode object
"""
if isinstance(x, bytes_type):
return x.decode("utf-8")
else:
return unicode_type(x)
def to_bytes(x):
"""
Convert any object to a byte string
"""
if isinstance(x, unicode_type):
return x.encode("utf-8")
elif isinstance(x, bytes_type):
return x
elif isinstance(x, float):
return repr(x).encode("utf-8")
else:
return str(x).encode("utf-8")
def get_username():
"""
Return the name of the current user, or None if the current user
could not be determined.
"""
from metaflow.metaflow_config import USER
# note: the order of the list matters
ENVVARS = ["SUDO_USER", "USERNAME", "USER"]
for user in [USER] + [os.environ.get(x) for x in ENVVARS]:
if user and user != "root":
return user
return None
def resolve_identity_as_tuple():
from metaflow.exception import MetaflowUnknownUser
prod_token = os.environ.get("METAFLOW_PRODUCTION_TOKEN")
if prod_token:
return "production", prod_token
user = get_username()
if user and user != "root":
return "user", user
else:
raise MetaflowUnknownUser()
def resolve_identity():
identity_type, identity_value = resolve_identity_as_tuple()
return "%s:%s" % (identity_type, identity_value)
def parse_spin_pathspec(pathspec: str, flow_name: str) -> Tuple:
"""
Parse various pathspec formats for the spin command.
Parameters
----------
pathspec : str
The pathspec string in one of the following formats:
- step_name (e.g., 'start')
- run_id/step_name (e.g., '221165/start')
- run_id/step_name/task_id (e.g., '221165/start/1350987')
- flow_name/run_id/step_name (e.g., 'ScalableFlow/221165/start')
- flow_name/run_id/step_name/task_id (e.g., 'ScalableFlow/221165/start/1350987')
flow_name : str
The name of the current flow.
Returns
-------
Tuple
A tuple of (step_name, full_pathspec_or_none)
Raises
------
CommandException
If the pathspec format is invalid or flow name doesn't match.
"""
from .exception import CommandException
parts = pathspec.split("/")
if len(parts) == 1:
# Just step name: 'start'
step_name = parts[0]
parsed_pathspec = None
elif len(parts) == 2:
# run_id/step_name: '221165/start'
run_id, step_name = parts
parsed_pathspec = f"{flow_name}/{run_id}/{step_name}"
elif len(parts) == 3:
# Could be run_id/step_name/task_id or flow_name/run_id/step_name
if parts[0] == flow_name:
# flow_name/run_id/step_name
_, run_id, step_name = parts
parsed_pathspec = f"{flow_name}/{run_id}/{step_name}"
else:
# run_id/step_name/task_id
run_id, step_name, task_id = parts
parsed_pathspec = f"{flow_name}/{run_id}/{step_name}/{task_id}"
elif len(parts) == 4:
# flow_name/run_id/step_name/task_id
parsed_flow_name, run_id, step_name, task_id = parts
if parsed_flow_name != flow_name:
raise CommandException(
f"Flow name '{parsed_flow_name}' in pathspec does not match current flow '{flow_name}'."
)
parsed_pathspec = pathspec
else:
raise CommandException(
f"Invalid pathspec format: '{pathspec}'. \n"
"Expected formats:\n"
" - step_name (e.g., 'start')\n"
" - run_id/step_name (e.g., '221165/start')\n"
" - run_id/step_name/task_id (e.g., '221165/start/1350987')\n"
" - flow_name/run_id/step_name (e.g., 'ScalableFlow/221165/start')\n"
" - flow_name/run_id/step_name/task_id (e.g., 'ScalableFlow/221165/start/1350987')"
)
return step_name, parsed_pathspec
def get_latest_task_pathspec(
flow_name: str, step_name: str, run_id: str = None
) -> "metaflow.Task":
"""
Returns a task pathspec from the latest run (or specified run) of the flow for the queried step.
If the queried step has several tasks, the task pathspec of the first task is returned.
Parameters
----------
flow_name : str
The name of the flow.
step_name : str
The name of the step.
run_id : str, optional
The run ID to use. If None, uses the latest run.
Returns
-------
Task
A Metaflow Task instance containing the latest task for the queried step.
Raises
------
MetaflowNotFound
If no task or run is found for the queried step.
"""
from metaflow import Flow, Step
from metaflow.exception import MetaflowNotFound
if not run_id:
flow = Flow(flow_name)
run = flow.latest_run
if run is None:
raise MetaflowNotFound(f"No run found for flow {flow_name}")
run_id = run.id
try:
task = Step(f"{flow_name}/{run_id}/{step_name}").task
return task
except:
raise MetaflowNotFound(f"No task found for step {step_name} in run {run_id}")
def get_latest_run_id(echo, flow_name):
from metaflow.plugins.datastores.local_storage import LocalStorage
local_root = LocalStorage.datastore_root
if local_root is None:
local_root = LocalStorage.get_datastore_root_from_config(
echo, create_on_absent=False
)
if local_root:
path = os.path.join(local_root, flow_name, "latest_run")
if os.path.exists(path):
with open(path) as f:
return f.read()
return None
def write_latest_run_id(obj, run_id):
from metaflow.plugins.datastores.local_storage import LocalStorage
if LocalStorage.datastore_root is None:
LocalStorage.datastore_root = LocalStorage.get_datastore_root_from_config(
obj.echo
)
path = LocalStorage.path_join(LocalStorage.datastore_root, obj.flow.name)
try:
os.makedirs(path)
except OSError as x:
if x.errno != 17:
# Directories exists in other case which is fine
raise
with open(os.path.join(path, "latest_run"), "w") as f:
f.write(str(run_id))
def get_object_package_version(obj):
"""
Return the top level package name and package version that defines the
class of the given object.
"""
try:
module_name = obj.__class__.__module__
if "." in module_name:
top_package_name = module_name.split(".")[0]
else:
top_package_name = module_name
except AttributeError:
return None, None
try:
top_package_version = sys.modules[top_package_name].__version__
return top_package_name, top_package_version
except AttributeError:
return top_package_name, None
def compress_list(lst, separator=",", rangedelim=":", zlibmarker="!", zlibmin=500):
from metaflow.exception import MetaflowInternalError
bad_items = [x for x in lst if separator in x or rangedelim in x or zlibmarker in x]
if bad_items:
raise MetaflowInternalError(
"Item '%s' includes a delimiter character "
"so it can't be compressed" % bad_items[0]
)
# Three output modes:
lcp = longest_common_prefix(lst)
if len(lst) < 2 or not lcp:
# 1. Just a comma-separated list
res = separator.join(lst)
else:
# 2. Prefix and a comma-separated list of suffixes
lcplen = len(lcp)
residuals = [e[lcplen:] for e in lst]
res = rangedelim.join((lcp, separator.join(residuals)))
if len(res) < zlibmin:
return res
else:
# 3. zlib-compressed, base64-encoded, prefix-encoded list
# interestingly, a typical zlib-encoded list of suffixes
# has plenty of redundancy. Decoding the data *twice* helps a
# lot
compressed = zlib.compress(zlib.compress(to_bytes(res)))
return zlibmarker + base64.b64encode(compressed).decode("utf-8")
def decompress_list(lststr, separator=",", rangedelim=":", zlibmarker="!"):
# Three input modes:
if lststr[0] == zlibmarker:
# 3. zlib-compressed, base64-encoded
lstbytes = base64.b64decode(lststr[1:])
decoded = zlib.decompress(zlib.decompress(lstbytes)).decode("utf-8")
else:
decoded = lststr
if rangedelim in decoded:
prefix, suffixes = decoded.split(rangedelim)
# 2. Prefix and a comma-separated list of suffixes
return [prefix + suffix for suffix in suffixes.split(separator)]
else:
# 1. Just a comma-separated list
return decoded.split(separator)
def longest_common_prefix(lst):
if lst:
return "".join(
a for a, _ in takewhile(lambda t: t[0] == t[1], zip(min(lst), max(lst)))
)
else:
return ""
def get_metaflow_root():
return os.path.dirname(os.path.dirname(__file__))
def dict_to_cli_options(params):
# Prevent circular imports
from .user_configs.config_options import ConfigInput
for k, v in params.items():
# Omit boolean options set to false or None, but preserve options with an empty
# string argument.
if v is not False and v is not None:
# we need special handling for 'with' since it is a reserved
# keyword in Python, so we call it 'decospecs' in click args
if k == "decospecs":
k = "with"
if k in ("config", "config_value"):
# Special handling here since we gather them all in one option but actually
# need to send them one at a time using --config-value <name> kv.<name>
# Note it can be either config or config_value depending
# on click processing order.
for config_name in v.keys():
yield "--config-value"
yield to_unicode(config_name)
yield to_unicode(ConfigInput.make_key_name(config_name))
continue
if k == "local_config_file":
# Skip this value -- it should only be used locally and never when
# forming another command line
continue
k = k.replace("_", "-")
v = v if isinstance(v, (list, tuple, set)) else [v]
for value in v:
yield "--%s" % k
if not isinstance(value, bool):
value = to_unicode(value)
# Of the value starts with $, assume the caller wants shell variable
# expansion to happen, so we pass it as is.
# NOTE: We strip '\' to allow for various storages to use escaped
# shell variables as well.
if value.lstrip("\\").startswith("$"):
yield value
else:
# Otherwise, assume it is a literal value and quote it safely
yield _quote(value)
# This function is imported from https://github.com/cookiecutter/whichcraft
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note: This function was backported from the Python 3 source code.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
try: # Forced testing
from shutil import which as w
return w(cmd, mode, path)
except ImportError:
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def to_camelcase(obj):
"""
Convert all keys of a json to camel case from snake case.
"""
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, dict):
res = obj.__class__()
for k in obj:
res[re.sub(r"(?!^)_([a-zA-Z])", lambda x: x.group(1).upper(), k)] = (
to_camelcase(obj[k])
)
elif isinstance(obj, (list, set, tuple)):
res = obj.__class__(to_camelcase(v) for v in obj)
else:
return obj
return res
def to_pascalcase(obj):
"""
Convert all keys of a json to pascal case.
"""
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, dict):
res = obj.__class__()
for k in obj:
res[re.sub("([a-zA-Z])", lambda x: x.groups()[0].upper(), k, count=1)] = (
to_pascalcase(obj[k])
)
elif isinstance(obj, (list, set, tuple)):
res = obj.__class__(to_pascalcase(v) for v in obj)
else:
return obj
return res
def tar_safe_extract(tar, path=".", members=None, *, numeric_owner=False):
def is_within_directory(abs_directory, target):
prefix = os.path.commonprefix([abs_directory, os.path.abspath(target)])
return prefix == abs_directory
abs_directory = os.path.abspath(path)
if any(
not is_within_directory(abs_directory, os.path.join(path, member.name))
for member in tar.getmembers()
):
raise Exception("Attempted path traversal in TAR file")
tar.extractall(path, members, numeric_owner=numeric_owner)
def to_pod(value):
"""
Convert a python object to plain-old-data (POD) format.
Parameters
----------
value : Any
Value to convert to POD format. The value can be a string, number, list,
dictionary, or a nested structure of these types.
"""
# Prevent circular imports
from metaflow.parameters import DeployTimeField
if isinstance(value, (str, int, float)):
return value
if isinstance(value, dict):
return {to_pod(k): to_pod(v) for k, v in value.items()}
if isinstance(value, (list, set, tuple)):
return [to_pod(v) for v in value]
if isinstance(value, DeployTimeField):
return value.print_representation
return str(value)
from metaflow._vendor.packaging.version import parse as version_parse
def read_artifacts_module(file_path: str) -> Dict[str, Any]:
"""
Read a Python module from the given file path and return its ARTIFACTS variable.
Parameters
----------
file_path : str
The path to the Python file containing the ARTIFACTS variable.
Returns
-------
Dict[str, Any]
A dictionary containing the ARTIFACTS variable from the module.
Raises
-------
MetaflowInternalError
If the file cannot be read or does not contain the ARTIFACTS variable.
"""
import importlib.util
import os
try:
module_name = os.path.splitext(os.path.basename(file_path))[0]
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
variables = vars(module)
if "ARTIFACTS" not in variables:
raise MetaflowInternalError(
f"Module {file_path} does not contain ARTIFACTS variable"
)
return variables.get("ARTIFACTS")
except Exception as e:
raise MetaflowInternalError(f"Error reading file {file_path}") from e
# this is os.walk(follow_symlinks=True) with cycle detection
def walk_without_cycles(
top_root: str,
exclude_dirs: Optional[List[str]] = None,
) -> Generator[Tuple[str, List[str], List[str]], None, None]:
seen = set()
default_skip_dirs = ["__pycache__"]
def _recurse(root, skip_dirs):
for parent, dirs, files in os.walk(root):
dirs[:] = [d for d in dirs if d not in skip_dirs]
for d in dirs:
path = os.path.join(parent, d)
if os.path.islink(path):
# Breaking loops: never follow the same symlink twice
#
# NOTE: this also means that links to sibling links are
# not followed. In this case:
#
# x -> y
# y -> oo
# oo/real_file
#
# real_file is only included twice, not three times
reallink = os.path.realpath(path)
if reallink not in seen:
seen.add(reallink)
for x in _recurse(path, default_skip_dirs):
yield x
yield parent, dirs, files
skip_dirs = set(default_skip_dirs + (exclude_dirs or []))
for x in _recurse(top_root, skip_dirs):
skip_dirs = default_skip_dirs
yield x
| TempDir |
python | getsentry__sentry | tests/sentry/integrations/jira/test_sentry_issue_details.py | {
"start": 7282,
"end": 12457
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.first_seen = datetime(2015, 8, 13, 3, 8, 25, tzinfo=timezone.utc)
self.last_seen = datetime(2016, 1, 13, 3, 8, 25, tzinfo=timezone.utc)
self.issue_key = "APP-123"
self.path = (
absolute_uri(f"extensions/jira/issue-details/{self.issue_key}/") + "?xdm_e=base_url"
)
self.integration = self.create_provider_integration(
provider="jira",
name="Example Jira",
metadata={
"base_url": "https://getsentry.atlassian.net",
"shared_secret": "a-super-secret-key-from-atlassian",
},
)
with assume_test_silo_mode(SiloMode.REGION, region_name="us"):
self.us_org = self.create_organization(region="us")
self.us_project = Project.objects.create(organization=self.us_org)
self.first_release = self.create_release(
project=self.us_project, version="v1.0", date_added=self.first_seen
)
self.last_release = self.create_release(
project=self.us_project, version="v1.1", date_added=self.last_seen
)
self.us_group = self.create_group(
self.us_project,
message="Sentry Error US",
first_seen=self.first_seen,
last_seen=self.last_seen,
first_release=self.first_release,
)
self.us_external_issue = self.create_integration_external_issue(
group=self.us_group,
integration=self.integration,
key=self.issue_key,
)
with assume_test_silo_mode(SiloMode.REGION, region_name="de"):
self.de_org = self.create_organization(region="de")
self.de_project = Project.objects.create(organization=self.de_org)
self.de_group = self.create_group(
self.de_project,
message="Sentry Error DE",
first_seen=self.first_seen + timedelta(hours=1),
last_seen=self.last_seen + timedelta(hours=1),
)
self.de_external_issue = self.create_integration_external_issue(
self.de_group,
self.integration,
self.issue_key,
)
self.integration.add_organization(self.us_org, self.user)
self.integration.add_organization(self.de_org, self.user)
self.login_as(self.user)
self.properties_key = f"com.atlassian.jira.issue:{JIRA_KEY}:sentry-issues-glance:status"
self.properties_url = "https://getsentry.atlassian.net/rest/api/3/issue/%s/properties/%s"
@patch(
"sentry.integrations.jira.views.sentry_issue_details.get_integration_from_request",
side_effect=ExpiredSignatureError(),
)
def test_expired_signature_error(self, mock_get_integration_from_request: MagicMock) -> None:
response = self.client.get(self.path)
assert response.status_code == 200
assert REFRESH_REQUIRED in response.content
@patch(
"sentry.integrations.jira.views.sentry_issue_details.get_integration_from_request",
side_effect=AtlassianConnectValidationError(),
)
def test_expired_invalid_installation_error(
self, mock_get_integration_from_request: MagicMock
) -> None:
response = self.client.get(self.path)
assert response.status_code == 200
assert UNABLE_TO_VERIFY_INSTALLATION.encode() in response.content
@patch.object(ExternalIssue.objects, "get")
@patch("sentry.integrations.jira.views.sentry_issue_details.get_integration_from_request")
@responses.activate
def test_simple_get(
self,
mock_get_integration_from_request: MagicMock,
mock_get_external_issue: MagicMock,
) -> None:
responses.add(
responses.PUT, self.properties_url % (self.issue_key, self.properties_key), json={}
)
mock_get_external_issue.side_effect = [self.de_external_issue, self.us_external_issue]
mock_get_integration_from_request.return_value = self.integration
response = self.client.get(self.path)
assert response.status_code == 200
resp_content = response.content.decode()
for group in [self.us_group, self.de_group]:
assert group.get_absolute_url() in resp_content
@patch("sentry.integrations.jira.views.sentry_issue_details.get_integration_from_request")
@responses.activate
def test_simple_not_linked(self, mock_get_integration_from_request: MagicMock) -> None:
issue_key = "bad-key"
responses.add(
responses.PUT, self.properties_url % (issue_key, self.properties_key), json={}
)
mock_get_integration_from_request.return_value = self.integration
path = absolute_uri("extensions/jira/issue-details/bad-key/") + "?xdm_e=base_url"
response = self.client.get(path)
assert response.status_code == 200
assert b"This Sentry issue is not linked to a Jira issue" in response.content
| JiraIssueHookControlTest |
python | django-import-export__django-import-export | import_export/resources.py | {
"start": 45116,
"end": 53939
} | class ____(Resource, metaclass=ModelDeclarativeMetaclass):
"""
ModelResource is Resource subclass for handling Django models.
"""
DEFAULT_RESOURCE_FIELD = Field
WIDGETS_MAP = {
"ManyToManyField": "get_m2m_widget",
"OneToOneField": "get_fk_widget",
"ForeignKey": "get_fk_widget",
"CharField": widgets.CharWidget,
"DecimalField": widgets.DecimalWidget,
"DateTimeField": widgets.DateTimeWidget,
"DateField": widgets.DateWidget,
"TimeField": widgets.TimeWidget,
"DurationField": widgets.DurationWidget,
"FloatField": widgets.FloatWidget,
"IntegerField": widgets.IntegerWidget,
"PositiveIntegerField": widgets.IntegerWidget,
"BigIntegerField": widgets.IntegerWidget,
"PositiveBigIntegerField": widgets.IntegerWidget,
"PositiveSmallIntegerField": widgets.IntegerWidget,
"SmallIntegerField": widgets.IntegerWidget,
"SmallAutoField": widgets.IntegerWidget,
"AutoField": widgets.IntegerWidget,
"BigAutoField": widgets.IntegerWidget,
"NullBooleanField": widgets.BooleanWidget,
"BooleanField": widgets.BooleanWidget,
"JSONField": widgets.JSONWidget,
}
@classmethod
def get_m2m_widget(cls, field):
"""
Prepare widget for m2m field
"""
return functools.partial(
widgets.ManyToManyWidget, model=get_related_model(field)
)
@classmethod
def get_fk_widget(cls, field):
"""
Prepare widget for fk and o2o fields
"""
model = get_related_model(field)
use_natural_foreign_keys = (
has_natural_foreign_key(model) and cls._meta.use_natural_foreign_keys
)
return functools.partial(
widgets.ForeignKeyWidget,
model=model,
use_natural_foreign_keys=use_natural_foreign_keys,
)
@classmethod
def widget_from_django_field(cls, f, default=widgets.Widget):
"""
Returns the widget that would likely be associated with each
Django type.
Includes mapping of Postgres Array field. In the case that
psycopg2 is not installed, we consume the error and process the field
regardless.
"""
result = default
internal_type = ""
if callable(getattr(f, "get_internal_type", None)):
internal_type = f.get_internal_type()
widget_result = cls.WIDGETS_MAP.get(internal_type)
if widget_result is not None:
result = widget_result
if isinstance(result, str):
result = getattr(cls, result)(f)
else:
# issue 1804
# The field class may be in a third party library as a subclass
# of a standard field class.
# iterate base classes to determine the correct widget class to use.
for base_class in f.__class__.__mro__:
widget_result = cls.WIDGETS_MAP.get(base_class.__name__)
if widget_result is not None:
result = widget_result
if isinstance(result, str):
result = getattr(cls, result)(f)
break
try:
from django.contrib.postgres.fields import ArrayField
except ImportError:
# ImportError: No module named psycopg2.extras
class ArrayField:
pass
if isinstance(f, ArrayField):
return widgets.SimpleArrayWidget
return result
@classmethod
def widget_kwargs_for_field(cls, field_name, django_field):
"""
Returns widget kwargs for given field_name.
"""
widget_kwargs = {}
if cls._meta.widgets:
cls_kwargs = cls._meta.widgets.get(field_name, {})
widget_kwargs.update(cls_kwargs)
if (
issubclass(django_field.__class__, fields.CharField)
and django_field.blank is True
):
widget_kwargs.update({"coerce_to_string": True, "allow_blank": True})
return widget_kwargs
@classmethod
def field_from_django_field(cls, field_name, django_field, readonly):
"""
Returns a Resource Field instance for the given Django model field.
"""
FieldWidget = cls.widget_from_django_field(django_field)
widget_kwargs = cls.widget_kwargs_for_field(field_name, django_field)
attribute = field_name
column_name = field_name
# To solve #974, #2107
# Check if there's a custom widget configuration for this field
has_custom_widget_declaration = (
cls._meta.widgets and field_name in cls._meta.widgets
)
if (
isinstance(django_field, ForeignKey)
and "__" not in column_name
and not cls._meta.use_natural_foreign_keys
and not has_custom_widget_declaration
):
attribute += "_id"
widget_kwargs["key_is_id"] = True
field = cls.DEFAULT_RESOURCE_FIELD(
attribute=attribute,
column_name=column_name,
widget=FieldWidget(**widget_kwargs),
readonly=readonly,
default=django_field.default,
)
return field
def get_queryset(self):
"""
Returns a queryset of all objects for this model. Override this if you
want to limit the returned queryset.
"""
return self._meta.model.objects.all()
def init_instance(self, row=None):
"""
Initializes a new Django model.
"""
return self._meta.model()
def after_import(self, dataset, result, **kwargs):
"""
Reset the SQL sequences after new objects are imported
"""
# Adapted from django's loaddata
dry_run = self._is_dry_run(kwargs)
if not dry_run and any(
r.import_type == RowResult.IMPORT_TYPE_NEW for r in result.rows
):
db_connection = self.get_db_connection_name()
connection = connections[db_connection]
sequence_sql = connection.ops.sequence_reset_sql(
no_style(), [self._meta.model]
)
if sequence_sql:
cursor = connection.cursor()
try:
for line in sequence_sql:
cursor.execute(line)
finally:
cursor.close()
@classmethod
def get_display_name(cls):
if hasattr(cls._meta, "name"):
return cls._meta.name
return cls.__name__
def modelresource_factory(
model,
resource_class=ModelResource,
meta_options=None,
custom_fields=None,
dehydrate_methods=None,
):
"""
Factory for creating ``ModelResource`` class for given Django model.
This factory function creates a ``ModelResource`` class dynamically, with support
for custom fields, methods.
:param model: Django model class
:param resource_class: Base resource class (default: ModelResource)
:param meta_options: Meta options dictionary
:param custom_fields: Dictionary mapping field names to Field object
:param dehydrate_methods: Dictionary mapping field names
to dehydrate method (Callable)
:returns: ModelResource class
"""
def _create_dehydrate_func_wrapper(func):
def wrapper(self, obj):
return func(obj)
return wrapper
if meta_options is None:
meta_options = {}
if custom_fields is None:
custom_fields = {}
if dehydrate_methods is None:
dehydrate_methods = {}
for field_name, field in custom_fields.items():
if not isinstance(field, Field):
raise ValueError(
f"custom_fields['{field_name}'] must be a Field instance, "
f"got {type(field).__name__}"
)
meta_class_attrs = {**meta_options, "model": model}
Meta = type("Meta", (object,), meta_class_attrs)
resource_class_name = model.__name__ + "Resource"
resource_class_attrs = {
"Meta": Meta,
}
resource_class_attrs.update(custom_fields)
for field_name, method in dehydrate_methods.items():
if not callable(method):
raise ValueError(
f"dehydrate_methods['{field_name}'] must be callable, "
f"got {type(method).__name__}"
)
method_name = f"dehydrate_{field_name}"
resource_class_attrs[method_name] = _create_dehydrate_func_wrapper(method)
metaclass = ModelDeclarativeMetaclass
return metaclass(resource_class_name, (resource_class,), resource_class_attrs)
| ModelResource |
python | crytic__slither | slither/detectors/statements/costly_operations_in_loop.py | {
"start": 1862,
"end": 4096
} | class ____(AbstractDetector):
ARGUMENT = "costly-loop"
HELP = "Costly operations in a loop"
IMPACT = DetectorClassification.INFORMATIONAL
# Overall the detector seems precise, but it does not take into account
# case where there are external calls or internal calls that might read the state
# variable changes. In these cases the optimization should not be applied
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#costly-operations-inside-a-loop"
WIKI_TITLE = "Costly operations inside a loop"
WIKI_DESCRIPTION = (
"Costly operations inside a loop might waste gas, so optimizations are justified."
)
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract CostlyOperationsInLoop{
uint loop_count = 100;
uint state_variable=0;
function bad() external{
for (uint i=0; i < loop_count; i++){
state_variable++;
}
}
function good() external{
uint local_variable = state_variable;
for (uint i=0; i < loop_count; i++){
local_variable++;
}
state_variable = local_variable;
}
}
```
Incrementing `state_variable` in a loop incurs a lot of gas because of expensive `SSTOREs`, which might lead to an `out-of-gas`."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Use a local variable to hold the loop computation result."
def _detect(self) -> List[Output]:
""""""
results: List[Output] = []
for c in self.compilation_unit.contracts_derived:
values = detect_costly_operations_in_loop(c)
for node, calls_stack in values:
func = node.function
info: DETECTOR_INFO = [func, " has costly operations inside a loop:\n"]
info += ["\t- ", node, "\n"]
if len(calls_stack) > 0:
info.append("\tCalls stack containing the loop:\n")
for call in calls_stack:
info.extend(["\t\t", call, "\n"])
res = self.generate_result(info)
results.append(res)
return results
| CostlyOperationsInLoop |
python | ray-project__ray | python/ray/serve/llm/request_router.py | {
"start": 226,
"end": 1797
} | class ____(_PrefixCacheAffinityRouter):
"""A request router that is aware of the KV cache.
This router optimizes request routing by considering KV cache locality,
directing requests with similar prefixes to the same replica to improve
cache hit rates.
The internal policy is this (it may change in the future):
1. Mixes between three strategies to balance prefix cache hit rate and load
balancing:
- When load is balanced (queue length difference < threshold), it
selects replicas with the highest prefix match rate for the input text
- When load is balanced but match rate is below 10%, it falls back to
the smallest tenants (i.e. the replica with the least kv cache)
- When load is imbalanced, it uses the default Power of Two selection
2. Maintains a prefix tree to track which replicas have processed similar
inputs:
- Inserts prompt text into the prefix tree after routing
- Uses this history to inform future routing decisions
Parameters:
imbalanced_threshold: The threshold for considering the load imbalanced.
match_rate_threshold: The threshold for considering the match rate.
do_eviction: Whether to do eviction.
eviction_threshold_chars: Number of characters in the tree to trigger
eviction.
eviction_target_chars: Number of characters in the tree to target for
eviction.
eviction_interval_secs: How often (in seconds) to run the eviction
policy.
"""
pass
| PrefixCacheAffinityRouter |
python | scikit-learn__scikit-learn | sklearn/externals/_packaging/_structures.py | {
"start": 1475,
"end": 2196
} | class ____:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __ne__(self, other: object) -> bool:
return not isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
| InfinityType |
python | mahmoud__glom | glom/reduction.py | {
"start": 4795,
"end": 8567
} | class ____(Fold):
"""The `Flatten` specifier type is used to combine iterables. By
default it flattens an iterable of iterables into a single list
containing items from all iterables.
>>> target = [[1], [2, 3]]
>>> glom(target, Flatten())
[1, 2, 3]
You can also set *init* to ``"lazy"``, which returns a generator
instead of a list. Use this to avoid making extra lists and other
collections during intermediate processing steps.
"""
def __init__(self, subspec=T, init=list):
if init == 'lazy':
self.lazy = True
init = list
else:
self.lazy = False
super().__init__(subspec=subspec, init=init, op=operator.iadd)
def _fold(self, iterator):
if self.lazy:
return itertools.chain.from_iterable(iterator)
return super()._fold(iterator)
def __repr__(self):
cn = self.__class__.__name__
args = () if self.subspec is T else (self.subspec,)
kwargs = {}
if self.lazy:
kwargs['init'] = 'lazy'
elif self.init is not list:
kwargs['init'] = self.init
return format_invocation(cn, args, kwargs, repr=bbrepr)
def flatten(target, **kwargs):
"""At its most basic, ``flatten()`` turns an iterable of iterables
into a single list. But it has a few arguments which give it more
power:
Args:
init (callable): A function or type which gives the initial
value of the return. The value must support addition. Common
values might be :class:`list` (the default), :class:`tuple`,
or even :class:`int`. You can also pass ``init="lazy"`` to
get a generator.
levels (int): A positive integer representing the number of
nested levels to flatten. Defaults to 1.
spec: The glomspec to fetch before flattening. This defaults to the
the root level of the object.
Usage is straightforward.
>>> target = [[1, 2], [3], [4]]
>>> flatten(target)
[1, 2, 3, 4]
Because integers themselves support addition, we actually have two
levels of flattening possible, to get back a single integer sum:
>>> flatten(target, init=int, levels=2)
10
However, flattening a non-iterable like an integer will raise an
exception:
>>> target = 10
>>> flatten(target)
Traceback (most recent call last):
...
FoldError: can only Flatten on iterable targets, not int type (...)
By default, ``flatten()`` will add a mix of iterables together,
making it a more-robust alternative to the built-in
``sum(list_of_lists, list())`` trick most experienced Python
programmers are familiar with using:
>>> list_of_iterables = [range(2), [2, 3], (4, 5)]
>>> sum(list_of_iterables, [])
Traceback (most recent call last):
...
TypeError: can only concatenate list (not "tuple") to list
Whereas flatten() handles this just fine:
>>> flatten(list_of_iterables)
[0, 1, 2, 3, 4, 5]
The ``flatten()`` function is a convenient wrapper around the
:class:`Flatten` specifier type. For embedding in larger specs,
and more involved flattening, see :class:`Flatten` and its base,
:class:`Fold`.
"""
subspec = kwargs.pop('spec', T)
init = kwargs.pop('init', list)
levels = kwargs.pop('levels', 1)
if kwargs:
raise TypeError('unexpected keyword args: %r' % sorted(kwargs.keys()))
if levels == 0:
return target
if levels < 0:
raise ValueError('expected levels >= 0, not %r' % levels)
spec = (subspec,)
spec += (Flatten(init="lazy"),) * (levels - 1)
spec += (Flatten(init=init),)
return glom(target, spec)
| Flatten |
python | great-expectations__great_expectations | great_expectations/core/expectation_validation_result.py | {
"start": 2239,
"end": 16686
} | class ____(SerializableDictDot):
"""An Expectation validation result.
Args:
success: Whether the Expectation validation was successful.
expectation_config: The configuration of the Expectation that was validated.
result: The result details that can take one of many result formats.
meta: Metadata associated with the validation result.
exception_info: Any exception information that was raised during validation. Takes the form:
raised_exception: boolean
exception_traceback: Optional, str
exception_message: Optional, str
rendered_content: Inline content for rendering.
Raises:
InvalidCacheValueError: Raised if the result does not pass validation.
"""
def __init__( # noqa: PLR0913 # FIXME CoP
self,
success: Optional[bool] = None,
expectation_config: Optional[ExpectationConfiguration] = None,
result: Optional[dict] = None,
meta: Optional[dict] = None,
exception_info: Optional[dict] = None,
rendered_content: Union[RenderedAtomicContent, List[RenderedAtomicContent], None] = None,
**kwargs: dict,
) -> None:
if result and not self.validate_result_dict(result):
raise gx_exceptions.InvalidCacheValueError(result)
self.success = success
self.expectation_config = expectation_config
# TODO: re-add
# assert_json_serializable(result, "result")
if result is None:
result = {}
self.result = result
if meta is None:
meta = {}
# We require meta information to be serializable, but do not convert until necessary
ensure_json_serializable(meta)
self.meta = meta
self.exception_info = exception_info or {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
}
self.rendered_content = rendered_content
@override
def __eq__(self, other):
"""ExpectationValidationResult equality ignores instance identity, relying only on properties.""" # noqa: E501 # FIXME CoP
# NOTE: JPC - 20200213 - need to spend some time thinking about whether we want to
# consistently allow dict as a comparison alternative in situations like these...
# if isinstance(other, dict):
# try:
# other = ExpectationValidationResult(**other)
# except ValueError:
# return NotImplemented
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return other == self
try:
if self.result and other.result:
common_keys = set(self.result.keys()) & other.result.keys()
result_dict = self.to_json_dict()["result"]
other_result_dict = other.to_json_dict()["result"]
contents_equal = all(result_dict[k] == other_result_dict[k] for k in common_keys)
else:
contents_equal = False
return all(
(
self.success == other.success,
(self.expectation_config is None and other.expectation_config is None)
or (
self.expectation_config is not None
and self.expectation_config.isEquivalentTo(
other=other.expectation_config, match_type="success"
)
),
# Result is a dictionary allowed to have nested dictionaries that are still of complex types (e.g. # noqa: E501 # FIXME CoP
# numpy) consequently, series' comparison can persist. Wrapping in all() ensures comparison is # noqa: E501 # FIXME CoP
# handled appropriately.
not (self.result or other.result) or contents_equal,
self.meta == other.meta,
self.exception_info == other.exception_info,
)
)
except (ValueError, TypeError):
# if invalid comparisons are attempted, the objects are not equal.
return False
@override
def __hash__(self) -> int:
"""Overrides the default implementation"""
# note that it is possible for two results to be equal but have different hashes
# this is because during comparison we only compare common keys
if self.result:
result_hash = hash(tuple(sorted(self.result.items())))
else:
result_hash = hash(None)
# Handle expectation_config hash
if self.expectation_config:
config_hash = hash(self.expectation_config)
else:
config_hash = hash(None)
return hash(
(
self.success,
config_hash,
result_hash,
tuple(sorted(self.meta.items())) if self.meta else (),
tuple(sorted(self.exception_info.items())) if self.exception_info else (),
)
)
def __ne__(self, other): # type: ignore[explicit-override] # FIXME
# Negated implementation of '__eq__'. TODO the method should be deleted when it will coincide with __eq__. # noqa: E501 # FIXME CoP
# return not self == other
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __ne__.
return NotImplemented
try:
return any(
(
self.success != other.success,
(self.expectation_config is None and other.expectation_config is not None)
or (
self.expectation_config is not None
and not self.expectation_config.isEquivalentTo(other.expectation_config)
),
# TODO should it be wrapped in all()/any()? Since it is the only difference to __eq__: # noqa: E501 # FIXME CoP
(self.result is None and other.result is not None)
or (self.result != other.result),
self.meta != other.meta,
self.exception_info != other.exception_info,
)
)
except (ValueError, TypeError):
# if invalid comparisons are attempted, the objects are not equal.
return True
@override
def __repr__(self) -> str:
"""
# TODO: <Alex>5/9/2022</Alex>
This implementation is non-ideal (it was agreed to employ it for development expediency). A better approach
would consist of "__str__()" calling "__repr__()", while all output options are handled through state variables.
""" # noqa: E501 # FIXME CoP
json_dict: dict = self.to_json_dict()
return json.dumps(json_dict, indent=2)
@override
def __str__(self) -> str:
"""
# TODO: <Alex>5/9/2022</Alex>
This implementation is non-ideal (it was agreed to employ it for development expediency). A better approach
would consist of "__str__()" calling "__repr__()", while all output options are handled through state variables.
""" # noqa: E501 # FIXME CoP
return json.dumps(self.to_json_dict(), indent=2)
def render(self) -> None:
"""Renders content using the:
- atomic prescriptive renderer for the expectation configuration associated with this
ExpectationValidationResult to self.expectation_config.rendered_content
- atomic diagnostic renderer for the expectation configuration associated with this
ExpectationValidationResult to self.rendered_content.
"""
inline_renderer_config: InlineRendererConfig = {
"class_name": "InlineRenderer",
"render_object": self,
}
module_name = "great_expectations.render.renderer.inline_renderer"
inline_renderer = instantiate_class_from_config(
config=inline_renderer_config,
runtime_environment={},
config_defaults={"module_name": module_name},
)
if not inline_renderer:
raise ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=inline_renderer_config["class_name"],
)
rendered_content: List[RenderedAtomicContent] = inline_renderer.get_rendered_content()
self.rendered_content = [
content_block
for content_block in rendered_content
if content_block.name.startswith(AtomicRendererType.DIAGNOSTIC)
]
if self.expectation_config:
self.expectation_config.rendered_content = [
content_block
for content_block in rendered_content
if content_block.name.startswith(AtomicRendererType.PRESCRIPTIVE)
]
@staticmethod
def validate_result_dict(result):
if result.get("unexpected_count") and result["unexpected_count"] < 0:
return False
if result.get("unexpected_percent") and (
result["unexpected_percent"] < 0 or result["unexpected_percent"] > 100 # noqa: PLR2004 # FIXME CoP
):
return False
if result.get("missing_percent") and (
result["missing_percent"] < 0 or result["missing_percent"] > 100 # noqa: PLR2004 # FIXME CoP
):
return False
if result.get("unexpected_percent_nonmissing") and (
result["unexpected_percent_nonmissing"] < 0
or result["unexpected_percent_nonmissing"] > 100 # noqa: PLR2004 # FIXME CoP
):
return False
return not (result.get("missing_count") and result["missing_count"] < 0)
@public_api
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this ExpectationValidationResult.
Returns:
A JSON-serializable dict representation of this ExpectationValidationResult.
"""
myself = expectationValidationResultSchema.dump(self)
# NOTE - JPC - 20191031: migrate to expectation-specific schemas that subclass result with properly-typed # noqa: E501 # FIXME CoP
# schemas to get serialization all-the-way down via dump
if "expectation_config" in myself:
myself["expectation_config"] = convert_to_json_serializable(
myself["expectation_config"]
)
if "result" in myself:
myself["result"] = convert_to_json_serializable(myself["result"])
if "meta" in myself:
myself["meta"] = convert_to_json_serializable(myself["meta"])
if "exception_info" in myself:
myself["exception_info"] = convert_to_json_serializable(myself["exception_info"])
if "rendered_content" in myself:
myself["rendered_content"] = convert_to_json_serializable(myself["rendered_content"])
return myself
def get_metric(self, metric_name, **kwargs): # noqa: C901 # too complex
if not self.expectation_config:
raise gx_exceptions.UnavailableMetricError( # noqa: TRY003 # FIXME CoP
"No ExpectationConfig found in this ExpectationValidationResult. Unable to "
"return a metric."
)
metric_name_parts = metric_name.split(".")
metric_kwargs_id = get_metric_kwargs_id(metric_kwargs=kwargs)
if metric_name_parts[0] == self.expectation_config.type:
curr_metric_kwargs = get_metric_kwargs_id(metric_kwargs=self.expectation_config.kwargs)
if metric_kwargs_id != curr_metric_kwargs:
raise gx_exceptions.UnavailableMetricError(
"Requested metric_kwargs_id ({}) does not match the configuration of this "
"ExpectationValidationResult ({}).".format(
metric_kwargs_id or "None", curr_metric_kwargs or "None"
)
)
if len(metric_name_parts) < 2: # noqa: PLR2004 # FIXME CoP
raise gx_exceptions.UnavailableMetricError( # noqa: TRY003 # FIXME CoP
"Expectation-defined metrics must include a requested metric."
)
elif len(metric_name_parts) == 2: # noqa: PLR2004 # FIXME CoP
if metric_name_parts[1] == "success":
return self.success
else:
raise gx_exceptions.UnavailableMetricError( # noqa: TRY003 # FIXME CoP
"Metric name must have more than two parts for keys other than success."
)
elif metric_name_parts[1] == "result":
try:
if len(metric_name_parts) == 3: # noqa: PLR2004 # FIXME CoP
return self.result.get(metric_name_parts[2])
elif metric_name_parts[2] == "details":
return self.result["details"].get(metric_name_parts[3])
except KeyError:
raise gx_exceptions.UnavailableMetricError( # noqa: TRY003 # FIXME CoP
f"Unable to get metric {metric_name} -- KeyError in "
"ExpectationValidationResult."
)
raise gx_exceptions.UnavailableMetricError(f"Unrecognized metric name {metric_name}") # noqa: TRY003 # FIXME CoP
def describe_dict(self) -> dict:
if self.expectation_config:
expectation_type = self.expectation_config.type
kwargs = self.expectation_config.kwargs
else:
expectation_type = None
kwargs = None
describe_dict = {
"expectation_type": expectation_type,
"success": self.success,
"kwargs": kwargs,
"result": self.result,
}
if self.exception_info.get("raised_exception"):
describe_dict["exception_info"] = self.exception_info
return convert_to_json_serializable(describe_dict)
@public_api
def describe(self) -> str:
"""JSON string description of this ExpectationValidationResult"""
return json.dumps(self.describe_dict(), indent=4)
| ExpectationValidationResult |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image26.py | {
"start": 315,
"end": 1060
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image26.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("B2", self.image_dir + "black_72.png")
worksheet.insert_image("B8", self.image_dir + "black_96.png")
worksheet.insert_image("B13", self.image_dir + "black_150.png")
worksheet.insert_image("B17", self.image_dir + "black_300.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pennersr__django-allauth | allauth/socialaccount/providers/coinbase/provider.py | {
"start": 315,
"end": 865
} | class ____(OAuth2Provider):
id = "coinbase"
name = "Coinbase"
account_class = CoinbaseAccount
oauth2_adapter_class = CoinbaseOAuth2Adapter
def get_default_scope(self):
# See: https://coinbase.com/docs/api/permissions
return ["wallet:user:email"]
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
# See: https://coinbase.com/api/doc/1.0/users/index.html
return dict(email=data["email"])
provider_classes = [CoinbaseProvider]
| CoinbaseProvider |
python | spyder-ide__spyder | spyder/utils/installers.py | {
"start": 977,
"end": 1229
} | class ____(SpyderInstallerError):
"""Error for missing dependencies"""
def _msg(self, msg):
msg = msg.replace('<br>', '\n')
msg = 'Missing dependencies' + textwrap.indent(msg, ' ')
return msg
| InstallerMissingDependencies |
python | huggingface__transformers | tests/models/fsmt/test_modeling_fsmt.py | {
"start": 5094,
"end": 11652
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": FSMTModel,
"summarization": FSMTForConditionalGeneration,
"text2text-generation": FSMTForConditionalGeneration,
"translation": FSMTForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
def setUp(self):
self.model_tester = FSMTModelTester(self)
self.langs = ["en", "ru"]
config = {
"langs": self.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
# XXX: hack to appease to all other models requiring `vocab_size`
config["vocab_size"] = 99 # no such thing in FSMT
self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config)
def test_config(self):
self.config_tester.run_common_tests()
# XXX: override test_model_get_set_embeddings / different Embedding type
def test_model_get_set_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding))
model.set_input_embeddings(nn.Embedding(10, 10))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.modules.sparse.Embedding))
def test_initialization_more(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = FSMTModel(config)
model.to(torch_device)
model.eval()
# test init
# self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item())
def _check_var(module):
"""Check that we initialized various parameters from N(0, config.init_std)."""
self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2)
_check_var(model.encoder.embed_tokens)
_check_var(model.encoder.layers[0].self_attn.k_proj)
_check_var(model.encoder.layers[0].fc1)
# XXX: different std for fairseq version of SinusoidalPositionalEmbedding
# self.assertAlmostEqual(torch.std(model.encoder.embed_positions.weights).item(), config.init_std, 2)
def test_advanced_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.use_cache = False
inputs_dict["input_ids"][:, -2:] = config.pad_token_id
decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs(
config, inputs_dict["input_ids"]
)
model = FSMTModel(config).to(torch_device).eval()
decoder_features_with_created_mask = model(**inputs_dict)[0]
decoder_features_with_passed_mask = model(
decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict
)[0]
_assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask)
useless_mask = torch.zeros_like(decoder_attn_mask)
decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0]
self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions
self.assertEqual(
decoder_features.size(),
(self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size),
)
if decoder_attn_mask.min().item() < -1e3: # some tokens were masked
self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item())
# Test different encoder attention masks
decoder_features_with_long_encoder_mask = model(
inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long()
)[0]
_assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask)
def test_save_load_missing_keys(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_ensure_weights_are_shared(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.tie_word_embeddings = True
model = FSMTForConditionalGeneration(config)
# FSMT shares three weights.
# Not an issue to not have these correctly tied for torch.load, but it is an issue for safetensors.
self.assertEqual(
len(
{
model.get_output_embeddings().weight.data_ptr(),
model.get_input_embeddings().weight.data_ptr(),
model.base_model.decoder.output_projection.weight.data_ptr(),
}
),
1,
)
config.tie_word_embeddings = False
model = FSMTForConditionalGeneration(config)
# FSMT shares three weights.
# Not an issue to not have these correctly tied for torch.load, but it is an issue for safetensors.
self.assertEqual(
len(
{
model.get_output_embeddings().weight.data_ptr(),
model.get_input_embeddings().weight.data_ptr(),
model.base_model.decoder.output_projection.weight.data_ptr(),
}
),
3,
)
@unittest.skip(reason="can't be implemented for FSMT due to dual vocab.")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Passing inputs_embeds not implemented for FSMT.")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Input ids is required for FSMT.")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="TODO: Decoder embeddings cannot be resized at the moment")
def test_resize_embeddings_untied(self):
pass
@require_torch
| FSMTModelTest |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 141610,
"end": 143429
} | class ____(BaseType):
# name string
# cname string
# type PyrexType
# pos source file position
# FIXME: is this the right setup? should None be allowed here?
not_none = False
or_none = False
accept_none = True
accept_builtin_subtypes = False
annotation = None
subtypes = ['type']
def __init__(self, name, type, pos=None, cname=None, annotation=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
if not self.cname.isascii():
# We have to be careful here not to create a circular import of Symtab.
# This creates a cname to match a Symtab entry that'll be created later
# - in an ideal world the name here would be taken from the entry...
from .Symtab import punycodify_name
self.cname = punycodify_name(self.cname)
if annotation is not None:
self.annotation = annotation
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
def is_forwarding_reference(self):
if self.type.is_rvalue_reference:
if (isinstance(self.type.ref_base_type, TemplatePlaceholderType)
and not self.type.ref_base_type.is_cv_qualified):
return True
return False
| CFuncTypeArg |
python | getsentry__sentry | src/sentry/seer/models.py | {
"start": 1337,
"end": 1462
} | class ____(SpanInsight):
trace_id: str
suggestions: list[str]
reference_url: str | None = None
| PageWebVitalsInsight |
python | getsentry__sentry | src/sentry/issues/ownership/grammar.py | {
"start": 7560,
"end": 8017
} | class ____(NamedTuple):
"""
An Owner represents a User or Team who owns this Rule.
type is either `user` or `team`.
Examples:
foo@example.com
#team
"""
type: str
identifier: str
def dump(self) -> dict[str, str]:
return {"type": self.type, "identifier": self.identifier}
@classmethod
def load(cls, data: Mapping[str, str]) -> Owner:
return cls(data["type"], data["identifier"])
| Owner |
python | tiangolo__fastapi | tests/test_jsonable_encoder.py | {
"start": 1369,
"end": 1438
} | class ____(BaseModel):
foo: str = Field(alias="Foo")
| ModelWithAlias |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks/types.py | {
"start": 1211,
"end": 2463
} | class ____(NamedTuple):
"""Represents the state of a Databricks job run."""
life_cycle_state: Optional["DatabricksRunLifeCycleState"]
result_state: Optional["DatabricksRunResultState"]
state_message: Optional[str]
def has_terminated(self) -> bool:
"""Has the job terminated?"""
return self.life_cycle_state is not None and self.life_cycle_state.has_terminated()
def is_skipped(self) -> bool:
return self.life_cycle_state is not None and self.life_cycle_state.is_skipped()
def is_successful(self) -> bool:
"""Was the job successful?"""
return self.result_state is not None and self.result_state.is_successful()
@classmethod
def from_databricks(cls, run_state: jobs.RunState) -> "DatabricksRunState":
return cls(
life_cycle_state=(
DatabricksRunLifeCycleState(run_state.life_cycle_state.value)
if run_state.life_cycle_state
else None
),
result_state=(
DatabricksRunResultState(run_state.result_state.value)
if run_state.result_state
else None
),
state_message=run_state.state_message,
)
| DatabricksRunState |
python | ray-project__ray | python/ray/serve/tests/unit/test_config.py | {
"start": 1239,
"end": 4428
} | class ____:
...
def test_autoscaling_config_validation():
# Check validation over publicly exposed options
with pytest.raises(ValidationError):
# min_replicas must be nonnegative
AutoscalingConfig(min_replicas=-1)
with pytest.raises(ValidationError):
# max_replicas must be positive
AutoscalingConfig(max_replicas=0)
# target_ongoing_requests must be nonnegative
with pytest.raises(ValidationError):
AutoscalingConfig(target_ongoing_requests=-1)
# max_replicas must be greater than or equal to min_replicas
with pytest.raises(ValueError):
AutoscalingConfig(min_replicas=100, max_replicas=1)
AutoscalingConfig(min_replicas=1, max_replicas=100)
AutoscalingConfig(min_replicas=10, max_replicas=10)
# initial_replicas must be greater than or equal to min_replicas
with pytest.raises(ValueError):
AutoscalingConfig(min_replicas=10, initial_replicas=1)
with pytest.raises(ValueError):
AutoscalingConfig(min_replicas=10, initial_replicas=1, max_replicas=15)
AutoscalingConfig(min_replicas=5, initial_replicas=10, max_replicas=15)
AutoscalingConfig(min_replicas=5, initial_replicas=5, max_replicas=15)
# initial_replicas must be less than or equal to max_replicas
with pytest.raises(ValueError):
AutoscalingConfig(initial_replicas=10, max_replicas=8)
with pytest.raises(ValueError):
AutoscalingConfig(min_replicas=1, initial_replicas=10, max_replicas=8)
AutoscalingConfig(min_replicas=1, initial_replicas=4, max_replicas=5)
AutoscalingConfig(min_replicas=1, initial_replicas=5, max_replicas=5)
# Default values should not raise an error
default_autoscaling_config = AutoscalingConfig()
assert default_autoscaling_config.policy.is_default_policy_function() is True
non_default_autoscaling_config = AutoscalingConfig(
policy={"policy_function": "ray.serve.tests.unit.test_config:fake_policy"}
)
assert non_default_autoscaling_config.policy.is_default_policy_function() is False
def test_autoscaling_config_metrics_interval_s_deprecation_warning() -> None:
"""Test that the metrics_interval_s deprecation warning is raised."""
# Warning is raised if we set metrics_interval_s to a non-default value
with pytest.warns(DeprecationWarning):
AutoscalingConfig(metrics_interval_s=5)
# ... even if the AutoscalingConfig is instantiated implicitly via the @serve.deployment decorator
with pytest.warns(DeprecationWarning):
@serve.deployment(autoscaling_config={"metrics_interval_s": 5})
class Foo:
...
# ... or if it is deserialized from proto as part of a DeploymentConfig (presumably in the Serve Controller)
deployment_config_proto_bytes = DeploymentConfig(
autoscaling_config=AutoscalingConfig(metrics_interval_s=5)
).to_proto_bytes()
with pytest.warns(DeprecationWarning):
DeploymentConfig.from_proto_bytes(deployment_config_proto_bytes)
# Default settings should not raise a warning
with warnings.catch_warnings():
warnings.simplefilter("error")
AutoscalingConfig()
| FakeRequestRouter |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project.py | {
"start": 879,
"end": 1381
} | class ____:
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username="eric", password="test")
self.pip = Project.objects.get(slug="pip")
# Create a External Version. ie: pull/merge request Version.
self.external_version = get(
Version,
identifier="pr-version",
verbose_name="99",
slug="99",
project=self.pip,
active=True,
type=EXTERNAL,
)
| ProjectMixin |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 121182,
"end": 122093
} | class ____(DateTime):
"""The SQL TIMESTAMP type.
:class:`_types.TIMESTAMP` datatypes have support for timezone storage on
some backends, such as PostgreSQL and Oracle Database. Use the
:paramref:`~types.TIMESTAMP.timezone` argument in order to enable
"TIMESTAMP WITH TIMEZONE" for these backends.
"""
__visit_name__ = "TIMESTAMP"
def __init__(self, timezone: bool = False):
"""Construct a new :class:`_types.TIMESTAMP`.
:param timezone: boolean. Indicates that the TIMESTAMP type should
enable timezone support, if available on the target database.
On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE".
If the target database does not support timezones, this flag is
ignored.
"""
super().__init__(timezone=timezone)
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
| TIMESTAMP |
python | getsentry__sentry | src/sentry/api/serializers/models/broadcast.py | {
"start": 1069,
"end": 1866
} | class ____(BroadcastSerializer):
def get_attrs(self, item_list, user, **kwargs):
attrs = super().get_attrs(item_list, user)
counts = dict(
BroadcastSeen.objects.filter(broadcast__in=item_list)
.values("broadcast")
.distinct()
.annotate(user_count=Count("broadcast"))
.values_list("broadcast", "user_count")
)
for item in attrs:
attrs[item]["user_count"] = counts.get(item.id, 0)
return attrs
def serialize(self, obj, attrs, user, **kwargs):
context = super().serialize(obj, attrs, user)
context["userCount"] = attrs["user_count"]
context["createdBy"] = obj.created_by_id.email if obj.created_by_id else None
return context
| AdminBroadcastSerializer |
python | optuna__optuna | optuna/storages/journal/_storage.py | {
"start": 16481,
"end": 27767
} | class ____:
def __init__(self, worker_id_prefix: str) -> None:
self.log_number_read = 0
self._worker_id_prefix = worker_id_prefix
self._studies: dict[int, FrozenStudy] = {}
self._trials: dict[int, FrozenTrial] = {}
self._study_id_to_trial_ids: dict[int, list[int]] = {}
self._trial_id_to_study_id: dict[int, int] = {}
self._next_study_id: int = 0
self._worker_id_to_owned_trial_id: dict[str, int] = {}
def apply_logs(self, logs: Iterable[dict[str, Any]]) -> None:
for log in logs:
self.log_number_read += 1
op = log["op_code"]
if op == JournalOperation.CREATE_STUDY:
self._apply_create_study(log)
elif op == JournalOperation.DELETE_STUDY:
self._apply_delete_study(log)
elif op == JournalOperation.SET_STUDY_USER_ATTR:
self._apply_set_study_user_attr(log)
elif op == JournalOperation.SET_STUDY_SYSTEM_ATTR:
self._apply_set_study_system_attr(log)
elif op == JournalOperation.CREATE_TRIAL:
self._apply_create_trial(log)
elif op == JournalOperation.SET_TRIAL_PARAM:
self._apply_set_trial_param(log)
elif op == JournalOperation.SET_TRIAL_STATE_VALUES:
self._apply_set_trial_state_values(log)
elif op == JournalOperation.SET_TRIAL_INTERMEDIATE_VALUE:
self._apply_set_trial_intermediate_value(log)
elif op == JournalOperation.SET_TRIAL_USER_ATTR:
self._apply_set_trial_user_attr(log)
elif op == JournalOperation.SET_TRIAL_SYSTEM_ATTR:
self._apply_set_trial_system_attr(log)
else:
assert False, "Should not reach."
def get_study(self, study_id: int) -> FrozenStudy:
if study_id not in self._studies:
raise KeyError(NOT_FOUND_MSG)
return self._studies[study_id]
def get_all_studies(self) -> list[FrozenStudy]:
return list(self._studies.values())
def get_trial(self, trial_id: int) -> FrozenTrial:
if trial_id not in self._trials:
raise KeyError(NOT_FOUND_MSG)
return self._trials[trial_id]
def get_all_trials(
self, study_id: int, states: Container[TrialState] | None
) -> list[FrozenTrial]:
if study_id not in self._studies:
raise KeyError(NOT_FOUND_MSG)
frozen_trials: list[FrozenTrial] = []
for trial_id in self._study_id_to_trial_ids[study_id]:
trial = self._trials[trial_id]
if states is None or trial.state in states:
frozen_trials.append(trial)
return frozen_trials
@property
def worker_id(self) -> str:
return self._worker_id_prefix + str(threading.get_ident())
@property
def owned_trial_id(self) -> int | None:
return self._worker_id_to_owned_trial_id.get(self.worker_id)
def _is_issued_by_this_worker(self, log: dict[str, Any]) -> bool:
return log["worker_id"] == self.worker_id
def _study_exists(self, study_id: int, log: dict[str, Any]) -> bool:
if study_id in self._studies:
return True
if self._is_issued_by_this_worker(log):
raise KeyError(NOT_FOUND_MSG)
return False
def _apply_create_study(self, log: dict[str, Any]) -> None:
study_name = log["study_name"]
directions = [StudyDirection(d) for d in log["directions"]]
if study_name in [s.study_name for s in self._studies.values()]:
if self._is_issued_by_this_worker(log):
raise DuplicatedStudyError(
"Another study with name '{}' already exists. "
"Please specify a different name, or reuse the existing one "
"by setting `load_if_exists` (for Python API) or "
"`--skip-if-exists` flag (for CLI).".format(study_name)
)
return
study_id = self._next_study_id
self._next_study_id += 1
self._studies[study_id] = FrozenStudy(
study_name=study_name,
direction=None,
user_attrs={},
system_attrs={},
study_id=study_id,
directions=directions,
)
self._study_id_to_trial_ids[study_id] = []
def _apply_delete_study(self, log: dict[str, Any]) -> None:
study_id = log["study_id"]
if self._study_exists(study_id, log):
fs = self._studies.pop(study_id)
assert fs._study_id == study_id
def _apply_set_study_user_attr(self, log: dict[str, Any]) -> None:
study_id = log["study_id"]
if self._study_exists(study_id, log):
assert len(log["user_attr"]) == 1
self._studies[study_id].user_attrs.update(log["user_attr"])
def _apply_set_study_system_attr(self, log: dict[str, Any]) -> None:
study_id = log["study_id"]
if self._study_exists(study_id, log):
assert len(log["system_attr"]) == 1
self._studies[study_id].system_attrs.update(log["system_attr"])
def _apply_create_trial(self, log: dict[str, Any]) -> None:
study_id = log["study_id"]
if not self._study_exists(study_id, log):
return
trial_id = len(self._trials)
distributions = {}
if "distributions" in log:
distributions = {k: json_to_distribution(v) for k, v in log["distributions"].items()}
params = {}
if "params" in log:
params = {k: distributions[k].to_external_repr(p) for k, p in log["params"].items()}
if log["datetime_start"] is not None:
datetime_start = datetime.datetime.fromisoformat(log["datetime_start"])
else:
datetime_start = None
if "datetime_complete" in log:
datetime_complete = datetime.datetime.fromisoformat(log["datetime_complete"])
else:
datetime_complete = None
self._trials[trial_id] = FrozenTrial(
trial_id=trial_id,
number=len(self._study_id_to_trial_ids[study_id]),
state=TrialState(log.get("state", TrialState.RUNNING.value)),
params=params,
distributions=distributions,
user_attrs=log.get("user_attrs", {}),
system_attrs=log.get("system_attrs", {}),
value=log.get("value", None),
intermediate_values={int(k): v for k, v in log.get("intermediate_values", {}).items()},
datetime_start=datetime_start,
datetime_complete=datetime_complete,
values=log.get("values", None),
)
self._study_id_to_trial_ids[study_id].append(trial_id)
self._trial_id_to_study_id[trial_id] = study_id
if self._is_issued_by_this_worker(log):
self._last_created_trial_id_by_this_process = trial_id
if self._trials[trial_id].state == TrialState.RUNNING:
self._worker_id_to_owned_trial_id[self.worker_id] = trial_id
def _apply_set_trial_param(self, log: dict[str, Any]) -> None:
trial_id = log["trial_id"]
if not self._trial_exists_and_updatable(trial_id, log):
return
param_name = log["param_name"]
param_value_internal = log["param_value_internal"]
distribution = json_to_distribution(log["distribution"])
study_id = self._trial_id_to_study_id[trial_id]
for prev_trial_id in self._study_id_to_trial_ids[study_id]:
prev_trial = self._trials[prev_trial_id]
if param_name in prev_trial.params.keys():
try:
check_distribution_compatibility(
prev_trial.distributions[param_name], distribution
)
except Exception:
if self._is_issued_by_this_worker(log):
raise
return
break
trial = copy.copy(self._trials[trial_id])
trial.params = {
**copy.copy(trial.params),
param_name: distribution.to_external_repr(param_value_internal),
}
trial.distributions = {**copy.copy(trial.distributions), param_name: distribution}
self._trials[trial_id] = trial
def _apply_set_trial_state_values(self, log: dict[str, Any]) -> None:
trial_id = log["trial_id"]
if not self._trial_exists_and_updatable(trial_id, log):
return
state = TrialState(log["state"])
if state == self._trials[trial_id].state and state == TrialState.RUNNING:
# Reject the operation as the popped trial is already run by another process.
return
trial = copy.copy(self._trials[trial_id])
if state == TrialState.RUNNING:
trial.datetime_start = datetime.datetime.fromisoformat(log["datetime_start"])
if self._is_issued_by_this_worker(log):
self._worker_id_to_owned_trial_id[self.worker_id] = trial_id
if state.is_finished():
trial.datetime_complete = datetime.datetime.fromisoformat(log["datetime_complete"])
trial.state = state
if log["values"] is not None:
trial.values = log["values"]
self._trials[trial_id] = trial
def _apply_set_trial_intermediate_value(self, log: dict[str, Any]) -> None:
trial_id = log["trial_id"]
if self._trial_exists_and_updatable(trial_id, log):
trial = copy.copy(self._trials[trial_id])
trial.intermediate_values = {
**copy.copy(trial.intermediate_values),
log["step"]: log["intermediate_value"],
}
self._trials[trial_id] = trial
def _apply_set_trial_user_attr(self, log: dict[str, Any]) -> None:
trial_id = log["trial_id"]
if self._trial_exists_and_updatable(trial_id, log):
assert len(log["user_attr"]) == 1
trial = copy.copy(self._trials[trial_id])
trial.user_attrs = {**copy.copy(trial.user_attrs), **log["user_attr"]}
self._trials[trial_id] = trial
def _apply_set_trial_system_attr(self, log: dict[str, Any]) -> None:
trial_id = log["trial_id"]
if self._trial_exists_and_updatable(trial_id, log):
assert len(log["system_attr"]) == 1
trial = copy.copy(self._trials[trial_id])
trial.system_attrs = {
**copy.copy(trial.system_attrs),
**log["system_attr"],
}
self._trials[trial_id] = trial
def _trial_exists_and_updatable(self, trial_id: int, log: dict[str, Any]) -> bool:
if trial_id not in self._trials:
if self._is_issued_by_this_worker(log):
raise KeyError(NOT_FOUND_MSG)
return False
elif self._trials[trial_id].state.is_finished():
if self._is_issued_by_this_worker(log):
raise UpdateFinishedTrialError(
UNUPDATABLE_MSG.format(trial_number=self._trials[trial_id].number)
)
return False
else:
return True
| JournalStorageReplayResult |
python | pypa__hatch | tests/backend/utils/test_context.py | {
"start": 260,
"end": 576
} | class ____:
def test_directory_separator(self, isolation):
context = Context(isolation)
assert context.format("foo {/}") == f"foo {os.sep}"
def test_path_separator(self, isolation):
context = Context(isolation)
assert context.format("foo {;}") == f"foo {os.pathsep}"
| TestStatic |
python | scrapy__scrapy | scrapy/utils/reactor.py | {
"start": 1484,
"end": 8978
} | class ____(Generic[_T]):
"""Schedule a function to be called in the next reactor loop, but only if
it hasn't been already scheduled since the last time it ran.
"""
def __init__(self, func: Callable[_P, _T], *a: _P.args, **kw: _P.kwargs):
self._func: Callable[_P, _T] = func
self._a: tuple[Any, ...] = a
self._kw: dict[str, Any] = kw
self._call: CallLaterResult | None = None
self._deferreds: list[Deferred] = []
def schedule(self, delay: float = 0) -> None:
# circular import
from scrapy.utils.asyncio import call_later # noqa: PLC0415
if self._call is None:
self._call = call_later(delay, self)
def cancel(self) -> None:
if self._call:
self._call.cancel()
def __call__(self) -> _T:
# circular import
from scrapy.utils.asyncio import call_later # noqa: PLC0415
self._call = None
result = self._func(*self._a, **self._kw)
for d in self._deferreds:
call_later(0, d.callback, None)
self._deferreds = []
return result
async def wait(self):
# circular import
from scrapy.utils.defer import maybe_deferred_to_future # noqa: PLC0415
d = Deferred()
self._deferreds.append(d)
await maybe_deferred_to_future(d)
_asyncio_reactor_path = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
def set_asyncio_event_loop_policy() -> None:
"""The policy functions from asyncio often behave unexpectedly,
so we restrict their use to the absolutely essential case.
This should only be used to install the reactor.
"""
policy = asyncio.get_event_loop_policy()
if sys.platform == "win32" and not isinstance(
policy, asyncio.WindowsSelectorEventLoopPolicy
):
policy = asyncio.WindowsSelectorEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
def install_reactor(reactor_path: str, event_loop_path: str | None = None) -> None:
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path. Also installs the asyncio event loop with the specified import
path if the asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
set_asyncio_event_loop_policy()
with suppress(error.ReactorAlreadyInstalledError):
event_loop = set_asyncio_event_loop(event_loop_path)
asyncioreactor.install(eventloop=event_loop)
else:
*module, _ = reactor_path.split(".")
installer_path = [*module, "install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
def _get_asyncio_event_loop() -> AbstractEventLoop:
return set_asyncio_event_loop(None)
def set_asyncio_event_loop(event_loop_path: str | None) -> AbstractEventLoop:
"""Sets and returns the event loop with specified import path."""
if event_loop_path is not None:
event_loop_class: type[AbstractEventLoop] = load_object(event_loop_path)
event_loop = _get_asyncio_event_loop()
if not isinstance(event_loop, event_loop_class):
event_loop = event_loop_class()
asyncio.set_event_loop(event_loop)
else:
try:
with catch_warnings():
# In Python 3.10.9, 3.11.1, 3.12 and 3.13, a DeprecationWarning
# is emitted about the lack of a current event loop, because in
# Python 3.14 and later `get_event_loop` will raise a
# RuntimeError in that event. Because our code is already
# prepared for that future behavior, we ignore the deprecation
# warning.
filterwarnings(
"ignore",
message="There is no current event loop",
category=DeprecationWarning,
)
event_loop = asyncio.get_event_loop()
except RuntimeError:
# `get_event_loop` raises RuntimeError when called with no asyncio
# event loop yet installed in the following scenarios:
# - Previsibly on Python 3.14 and later.
# https://github.com/python/cpython/issues/100160#issuecomment-1345581902
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop
def verify_installed_reactor(reactor_path: str) -> None:
"""Raise :exc:`RuntimeError` if the installed
:mod:`~twisted.internet.reactor` does not match the specified import
path or if no reactor is installed."""
if not is_reactor_installed():
raise RuntimeError(
"verify_installed_reactor() called without an installed reactor."
)
from twisted.internet import reactor
expected_reactor_type = load_object(reactor_path)
reactor_type = type(reactor)
if not reactor_type == expected_reactor_type:
raise RuntimeError(
f"The installed reactor ({global_object_name(reactor_type)}) "
f"does not match the requested one ({reactor_path})"
)
def verify_installed_asyncio_event_loop(loop_path: str) -> None:
"""Raise :exc:`RuntimeError` if the even loop of the installed
:class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`
does not match the specified import path or if no reactor is installed."""
if not is_reactor_installed():
raise RuntimeError(
"verify_installed_asyncio_event_loop() called without an installed reactor."
)
from twisted.internet import reactor
loop_class = load_object(loop_path)
if isinstance(reactor._asyncioEventloop, loop_class):
return
installed = (
f"{reactor._asyncioEventloop.__class__.__module__}"
f".{reactor._asyncioEventloop.__class__.__qualname__}"
)
raise RuntimeError(
"Scrapy found an asyncio Twisted reactor already "
f"installed, and its event loop class ({installed}) does "
"not match the one specified in the ASYNCIO_EVENT_LOOP "
f"setting ({global_object_name(loop_class)})"
)
def is_reactor_installed() -> bool:
"""Check whether a :mod:`~twisted.internet.reactor` is installed."""
return "twisted.internet.reactor" in sys.modules
def is_asyncio_reactor_installed() -> bool:
"""Check whether the installed reactor is :class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`.
Raise a :exc:`RuntimeError` if no reactor is installed.
In a future Scrapy version, when Scrapy supports running without a Twisted
reactor, this function won't be useful for checking if it's possible to use
asyncio features, so the code that that doesn't directly require a Twisted
reactor should use :func:`scrapy.utils.asyncio.is_asyncio_available`
instead of this function.
.. versionchanged:: 2.13
In earlier Scrapy versions this function silently installed the default
reactor if there was no reactor installed. Now it raises an exception to
prevent silent problems in this case.
"""
if not is_reactor_installed():
raise RuntimeError(
"is_asyncio_reactor_installed() called without an installed reactor."
)
from twisted.internet import reactor
return isinstance(reactor, asyncioreactor.AsyncioSelectorReactor)
| CallLaterOnce |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 49608,
"end": 49917
} | class ____(SomeLayout):
layout: SomeLayout
axes: Sequence[int]
def to_mgpu(self) -> mgpu.FragmentedLayout:
layout = self.layout.to_mgpu()
if not isinstance(layout, mgpu.TiledLayout):
raise ValueError("Only TiledLayout supports reductions.")
return layout.reduce(self.axes)
| ReducedLayout |
python | walkccc__LeetCode | solutions/592. Fraction Addition and Subtraction/592.py | {
"start": 0,
"end": 324
} | class ____:
def fractionAddition(self, expression: str) -> str:
ints = list(map(int, re.findall('[+-]?[0-9]+', expression)))
A = 0
B = 1
for a, b in zip(ints[::2], ints[1::2]):
A = A * b + a * B
B *= b
g = math.gcd(A, B)
A //= g
B //= g
return str(A) + '/' + str(B)
| Solution |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 12306,
"end": 12503
} | class ____(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
| TestReadValuesNestedSingle |
python | numpy__numpy | benchmarks/benchmarks/bench_ma.py | {
"start": 9209,
"end": 9962
} | class ____(Benchmark):
param_names = ["size"]
params = [["small", "large"]]
def setup(self, size):
# Set the proportion of masked values.
prop_mask = 0.2
# Set up a "small" array with 10 vars and 10 obs.
rng = np.random.default_rng()
data = rng.random((10, 10), dtype=np.float32)
self.small = np.ma.array(data, mask=(data <= prop_mask))
# Set up a "large" array with 100 vars and 100 obs.
data = rng.random((100, 100), dtype=np.float32)
self.large = np.ma.array(data, mask=(data <= prop_mask))
def time_corrcoef(self, size):
if size == "small":
np.ma.corrcoef(self.small)
if size == "large":
np.ma.corrcoef(self.large)
| Corrcoef |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 34073,
"end": 34641
} | class ____(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~spack.vendor.jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ("body",)
body: t.List[Node]
# make sure nobody creates custom nodes
def _failing_new(*args: t.Any, **kwargs: t.Any) -> "te.NoReturn":
raise TypeError("can't create custom node types")
NodeType.__new__ = staticmethod(_failing_new) # type: ignore
del _failing_new
| ScopedEvalContextModifier |
python | pyinstaller__pyinstaller | tests/functional/modules/pyi_testmod_dynamic.py | {
"start": 867,
"end": 1139
} | class ____(types.ModuleType):
__file__ = __file__
def __init__(self, name):
super().__init__(name)
self.foo = "A new value!"
# Replace module 'pyi_testmod_dynamic' by class DynamicModule.
sys.modules[__name__] = DynamicModule(__name__)
| DynamicModule |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/foundry.py | {
"start": 1629,
"end": 1862
} | class ____(BetaMessages):
@cached_property
@override
def batches(self) -> None: # type: ignore[override]
"""Batches endpoint is not supported for Anthropic Foundry client."""
return None
| BetaFoundryMessages |
python | zarr-developers__zarr-python | tests/test_api/test_asynchronous.py | {
"start": 750,
"end": 3573
} | class ____(WithShape):
chunklen: int
@pytest.mark.parametrize(
("observed", "expected"),
[
({}, (None, None)),
(WithShape(shape=(1, 2)), ((1, 2), None)),
(WithChunks(shape=(1, 2), chunks=(1, 2)), ((1, 2), (1, 2))),
(WithChunkLen(shape=(10, 10), chunklen=1), ((10, 10), (1, 10))),
],
)
def test_get_shape_chunks(
observed: object, expected: tuple[tuple[int, ...] | None, tuple[int, ...] | None]
) -> None:
"""
Test the _get_shape_chunks function
"""
assert _get_shape_chunks(observed) == expected
@pytest.mark.parametrize(
("observed", "expected"),
[
(np.arange(10, dtype=np.dtype("int64")), {"shape": (10,), "dtype": np.dtype("int64")}),
(WithChunks(shape=(1, 2), chunks=(1, 2)), {"chunks": (1, 2), "shape": (1, 2)}),
(
create_array(
{},
chunks=(10,),
shape=(100,),
dtype="f8",
compressors=None,
filters=None,
zarr_format=2,
)._async_array,
{
"chunks": (10,),
"shape": (100,),
"dtype": np.dtype("f8"),
"compressor": None,
"filters": None,
"order": "C",
},
),
],
)
def test_like_args(
observed: AsyncArray[ArrayV2Metadata]
| AsyncArray[ArrayV3Metadata]
| AnyArray
| npt.NDArray[Any],
expected: object,
) -> None:
"""
Test the like_args function
"""
assert _like_args(observed) == expected
async def test_open_no_array() -> None:
"""
Test that zarr.api.asynchronous.open attempts to open a group when no array is found, but shape was specified in kwargs.
This behavior makes no sense but we should still test it.
"""
store = {
"zarr.json": default_buffer_prototype().buffer.from_bytes(
json.dumps({"zarr_format": 3, "node_type": "group"}).encode("utf-8")
)
}
with pytest.raises(
TypeError, match=r"open_group\(\) got an unexpected keyword argument 'shape'"
):
await open(store=store, shape=(1,))
async def test_open_group_new_path(tmp_path: Path) -> None:
"""
Test that zarr.api.asynchronous.group properly handles a string representation of a local file
path that does not yet exist.
See https://github.com/zarr-developers/zarr-python/issues/3406
"""
# tmp_path exists, but tmp_path / "test.zarr" will not, which is important for this test
path = tmp_path / "test.zarr"
grp = await group(store=path, attributes={"a": 1})
assert isinstance(grp, AsyncGroup)
# Calling group on an existing store should just open that store
grp = await group(store=path)
assert grp.attrs == {"a": 1}
| WithChunkLen |
python | pandas-dev__pandas | pandas/tests/frame/test_nonunique_indexes.py | {
"start": 151,
"end": 11872
} | class ____:
def test_setattr_columns_vs_construct_with_columns(self):
# assignment
# GH 3687
arr = np.random.default_rng(2).standard_normal((3, 2))
idx = list(range(2))
df = DataFrame(arr, columns=["A", "A"])
df.columns = idx
expected = DataFrame(arr, columns=idx)
tm.assert_frame_equal(df, expected)
def test_setattr_columns_vs_construct_with_columns_datetimeindx(self):
idx = date_range("20130101", periods=4, freq="QE-NOV")
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]
)
df.columns = idx
expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
tm.assert_frame_equal(df, expected)
def test_insert_with_duplicate_columns(self):
# insert
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["string"] = "bah"
expected = DataFrame(
[[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],
columns=["foo", "bar", "foo", "hello", "string"],
)
tm.assert_frame_equal(df, expected)
with pytest.raises(ValueError, match="Length of value"):
df.insert(0, "AnotherColumn", range(len(df.index) - 1))
# insert same dtype
df["foo2"] = 3
expected = DataFrame(
[[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# set (non-dup)
df["foo2"] = 4
expected = DataFrame(
[[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
df["foo2"] = 3
# delete (non dup)
del df["bar"]
expected = DataFrame(
[[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],
columns=["foo", "foo", "hello", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# try to delete again (its not consolidated)
del df["hello"]
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# insert
df.insert(2, "new_col", 5.0)
expected = DataFrame(
[[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],
columns=["foo", "foo", "new_col", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# insert a dup
with pytest.raises(ValueError, match="cannot insert"):
df.insert(2, "new_col", 4.0)
df.insert(2, "new_col", 4.0, allow_duplicates=True)
expected = DataFrame(
[
[1, 1, 4.0, 5.0, "bah", 3],
[1, 2, 4.0, 5.0, "bah", 3],
[2, 3, 4.0, 5.0, "bah", 3],
],
columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# delete (dup)
del df["foo"]
expected = DataFrame(
[[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],
columns=["new_col", "new_col", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
def test_dup_across_dtypes(self):
# dup across dtypes
df = DataFrame(
[[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["foo2"] = 7.0
expected = DataFrame(
[[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
tm.assert_frame_equal(df, expected)
result = df["foo"]
expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])
tm.assert_frame_equal(result, expected)
# multiple replacements
df["foo"] = "string"
expected = DataFrame(
[
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
tm.assert_frame_equal(df, expected)
del df["foo"]
expected = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]
)
tm.assert_frame_equal(df, expected)
def test_column_dups_indexes(self):
# check column dups with index equal and not equal to df's index
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "A"],
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
expected_ser = Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"]},
columns=["A", "B", "A"],
)
this_df["A"] = index
tm.assert_frame_equal(this_df, expected_df)
def test_changing_dtypes_with_duplicate_columns(self):
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=["that", "that"]
)
expected = DataFrame(1.0, index=range(5), columns=["that", "that"])
df["that"] = 1.0
tm.assert_frame_equal(df, expected)
df = DataFrame(
np.random.default_rng(2).random((5, 2)), columns=["that", "that"]
)
expected = DataFrame(1, index=range(5), columns=["that", "that"])
df["that"] = 1
tm.assert_frame_equal(df, expected)
def test_dup_columns_comparisons(self):
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])
# not-comparing like-labelled
msg = (
r"Can only compare identically-labeled \(both index and columns\) "
"DataFrame objects"
)
with pytest.raises(ValueError, match=msg):
df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame(
[[False, True], [True, False], [False, False], [True, False]],
columns=["A", "A"],
)
tm.assert_frame_equal(result, expected)
def test_mixed_column_selection(self):
# mixed column selection
# GH 5639
dfbool = DataFrame(
{
"one": Series([True, True, False], index=["a", "b", "c"]),
"two": Series([False, False, True, False], index=["a", "b", "c", "d"]),
"three": Series([False, True, True, True], index=["a", "b", "c", "d"]),
}
)
expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)
result = dfbool[["one", "three", "one"]]
tm.assert_frame_equal(result, expected)
def test_multi_axis_dups(self):
# multi-axis dups
# GH 6121
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]].copy()
expected = z.loc[["a", "c", "a"]]
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]]
result = z.loc[["a", "c", "a"]]
tm.assert_frame_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["a", "a.1"]
expected = DataFrame([[1, 2]], columns=["a", "a.1"])
tm.assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"])
df.columns = ["b", "a", "a.1"]
expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"])
tm.assert_frame_equal(df, expected)
def test_columns_with_dup_index(self):
# with a dup index
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["b", "b"]
expected = DataFrame([[1, 2]], columns=["b", "b"])
tm.assert_frame_equal(df, expected)
def test_multi_dtype(self):
# multi-dtype
df = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]],
columns=["a", "a", "b", "b", "d", "c", "c"],
)
df.columns = list("ABCDEFG")
expected = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_multi_dtype2(self):
df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"])
df.columns = ["a", "a.1", "a.2", "a.3"]
expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"])
tm.assert_frame_equal(df, expected)
def test_dups_across_blocks(self):
# dups across blocks
df_float = DataFrame(
np.random.default_rng(2).standard_normal((10, 3)), dtype="float64"
)
df_int = DataFrame(
np.random.default_rng(2).standard_normal((10, 3)).astype("int64")
)
df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns)
df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns)
df_dt = DataFrame(
pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns
)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._mgr.blknos) == len(df.columns)
assert len(df._mgr.blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
def test_dup_columns_across_dtype(self):
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.0], [2, -2, 3.0]]
rs = DataFrame(vals, columns=["A", "A", "B"])
xp = DataFrame(vals)
xp.columns = ["A", "A", "B"]
tm.assert_frame_equal(rs, xp)
def test_set_value_by_index(self):
# See gh-12344
warn = None
msg = "will attempt to set the values inplace"
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list("AAA")
expected = df.iloc[:, 2].copy()
with tm.assert_produces_warning(warn, match=msg):
df.iloc[:, 0] = 3
tm.assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1].copy()
with tm.assert_produces_warning(warn, match=msg):
df.iloc[:, 0] = 3
tm.assert_series_equal(df.iloc[:, 1], expected)
| TestDataFrameNonuniqueIndexes |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor17.py | {
"start": 238,
"end": 353
} | class ____(Generic[T]):
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
| A |
python | google__jax | tests/scipy_fft_test.py | {
"start": 1409,
"end": 5378
} | class ____(jtu.JaxTestCase):
"""Tests for LAX-backed scipy.fft implementations"""
@jtu.sample_product(
dtype=real_dtypes,
shape=[(10,), (2, 5)],
n=[None, 1, 7, 13, 20],
axis=[-1, 0],
norm=[None, 'ortho', 'backward'],
)
def testDct(self, shape, dtype, n, axis, norm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
jnp_fn = lambda a: jsp_fft.dct(a, n=n, axis=axis, norm=norm)
np_fn = lambda a: osp_fft.dct(a, n=n, axis=axis, norm=norm)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(jnp_fn, args_maker, atol=1e-4)
@jtu.sample_product(
[dict(shape=shape, axes=axes, s=s)
for shape in [(10,), (10, 10), (9,), (2, 3, 4), (2, 3, 4, 5)]
for axes in _get_dctn_test_axes(shape)
for s in _get_dctn_test_s(shape, axes)],
dtype=real_dtypes,
norm=[None, 'ortho', 'backward'],
)
def testDctn(self, shape, dtype, s, axes, norm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
jnp_fn = lambda a: jsp_fft.dctn(a, s=s, axes=axes, norm=norm)
np_fn = lambda a: osp_fft.dctn(a, s=s, axes=axes, norm=norm)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(jnp_fn, args_maker, atol=1e-4)
@jtu.sample_product(
dtype=real_dtypes,
shape=[(10,), (2, 5)],
n=[None, 1, 7, 13, 20],
axis=[-1, 0],
norm=[None, 'ortho', 'backward'],
)
# TODO(phawkins): these tests are failing on T4 GPUs in CI with a
# CUDA_ERROR_ILLEGAL_ADDRESS.
@jtu.skip_on_devices("cuda")
def testiDct(self, shape, dtype, n, axis, norm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
jnp_fn = lambda a: jsp_fft.idct(a, n=n, axis=axis, norm=norm)
np_fn = lambda a: osp_fft.idct(a, n=n, axis=axis, norm=norm)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(jnp_fn, args_maker, atol=1e-4)
@jtu.sample_product(
[dict(shape=shape, axes=axes, s=s)
for shape in [(10,), (10, 10), (9,), (2, 3, 4), (2, 3, 4, 5)]
for axes in _get_dctn_test_axes(shape)
for s in _get_dctn_test_s(shape, axes)],
dtype=real_dtypes,
norm=[None, 'ortho', 'backward'],
)
# TODO(phawkins): these tests are failing on T4 GPUs in CI with a
# CUDA_ERROR_ILLEGAL_ADDRESS.
@jtu.skip_on_devices("cuda")
def testiDctn(self, shape, dtype, s, axes, norm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
jnp_fn = lambda a: jsp_fft.idctn(a, s=s, axes=axes, norm=norm)
np_fn = lambda a: osp_fft.idctn(a, s=s, axes=axes, norm=norm)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(jnp_fn, args_maker, atol=1e-4)
def testIdctNormalizationPrecision(self):
# reported in https://github.com/jax-ml/jax/issues/23895
if not config.enable_x64.value:
raise self.skipTest("requires jax_enable_x64=true")
x = np.ones(3, dtype="float64")
n = 10
expected = osp_fft.idct(x, n=n, type=2)
actual = jsp_fft.idct(x, n=n, type=2)
self.assertArraysAllClose(actual, expected, atol=1e-14)
@jtu.sample_product(func=['idctn', 'dctn'])
def testDctnShape(self, func):
# Regression test for https://github.com/jax-ml/jax/issues/31836
x = np.arange(10.0).reshape(5, 2)
kwds = dict(type=2, s=(12, 7), axes=(-2, -1))
osp_func = getattr(osp_fft, func)
jsp_func = getattr(jsp_fft, func)
expected = osp_func(x, **kwds)
actual = jsp_func(x, **kwds)
rtol = {np.float64: 1E-12, np.float32: 1E-4}
self.assertArraysAllClose(actual, expected, rtol=rtol)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LaxBackedScipyFftTests |
python | chardet__chardet | chardet/chardistribution.py | {
"start": 8546,
"end": 9576
} | class ____(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # type: ignore[reportIncompatibleMethodOverride]
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if 0x81 <= first_char <= 0x9F:
order = 188 * (first_char - 0x81)
elif 0xE0 <= first_char <= 0xEF:
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
| SJISDistributionAnalysis |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/tests/test_frontend_widget.py | {
"start": 242,
"end": 3219
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Create the application for the test case.
"""
cls._app = QtWidgets.QApplication.instance()
if cls._app is None:
cls._app = QtWidgets.QApplication([])
cls._app.setQuitOnLastWindowClosed(False)
@classmethod
def tearDownClass(cls):
""" Exit the application.
"""
QtWidgets.QApplication.quit()
def test_transform_classic_prompt(self):
""" Test detecting classic prompts.
"""
w = FrontendWidget(kind='rich')
t = w._highlighter.transform_classic_prompt
# Base case
self.assertEqual(t('>>> test'), 'test')
self.assertEqual(t(' >>> test'), 'test')
self.assertEqual(t('\t >>> test'), 'test')
# No prompt
self.assertEqual(t(''), '')
self.assertEqual(t('test'), 'test')
# Continuation prompt
self.assertEqual(t('... test'), 'test')
self.assertEqual(t(' ... test'), 'test')
self.assertEqual(t(' ... test'), 'test')
self.assertEqual(t('\t ... test'), 'test')
# Prompts that don't match the 'traditional' prompt
self.assertEqual(t('>>>test'), '>>>test')
self.assertEqual(t('>> test'), '>> test')
self.assertEqual(t('...test'), '...test')
self.assertEqual(t('.. test'), '.. test')
# Prefix indicating input from other clients
self.assertEqual(t('[remote] >>> test'), 'test')
# Random other prefix
self.assertEqual(t('[foo] >>> test'), '[foo] >>> test')
def test_transform_ipy_prompt(self):
""" Test detecting IPython prompts.
"""
w = FrontendWidget(kind='rich')
t = w._highlighter.transform_ipy_prompt
# In prompt
self.assertEqual(t('In [1]: test'), 'test')
self.assertEqual(t('In [2]: test'), 'test')
self.assertEqual(t('In [10]: test'), 'test')
self.assertEqual(t(' In [1]: test'), 'test')
self.assertEqual(t('\t In [1]: test'), 'test')
# No prompt
self.assertEqual(t(''), '')
self.assertEqual(t('test'), 'test')
# Continuation prompt
self.assertEqual(t(' ...: test'), 'test')
self.assertEqual(t(' ...: test'), 'test')
self.assertEqual(t(' ...: test'), 'test')
self.assertEqual(t('\t ...: test'), 'test')
# Prompts that don't match the in-prompt
self.assertEqual(t('In [1]:test'), 'In [1]:test')
self.assertEqual(t('[1]: test'), '[1]: test')
self.assertEqual(t('In: test'), 'In: test')
self.assertEqual(t(': test'), ': test')
self.assertEqual(t('...: test'), '...: test')
# Prefix indicating input from other clients
self.assertEqual(t('[remote] In [1]: test'), 'test')
# Random other prefix
self.assertEqual(t('[foo] In [1]: test'), '[foo] In [1]: test')
| TestFrontendWidget |
python | realpython__materials | python-contact-book/source_code_step_4/rpcontacts/views.py | {
"start": 277,
"end": 1524
} | class ____(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
self.setWindowTitle("RP Contacts")
self.resize(550, 250)
self.centralWidget = QWidget()
self.setCentralWidget(self.centralWidget)
self.layout = QHBoxLayout()
self.centralWidget.setLayout(self.layout)
self.contactsModel = ContactsModel()
self.setupUI()
def setupUI(self):
"""Setup the main window's GUI."""
# Create the table view widget
self.table = QTableView()
self.table.setModel(self.contactsModel.model)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.resizeColumnsToContents()
# Create buttons
self.addButton = QPushButton("Add...")
self.deleteButton = QPushButton("Delete")
self.clearAllButton = QPushButton("Clear All")
# Lay out the GUI
layout = QVBoxLayout()
layout.addWidget(self.addButton)
layout.addWidget(self.deleteButton)
layout.addStretch()
layout.addWidget(self.clearAllButton)
self.layout.addWidget(self.table)
self.layout.addLayout(layout)
| Window |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ScatterPlotItem.py | {
"start": 10646,
"end": 42440
} | class ____(GraphicsObject):
"""
Displays a set of x/y points. Instances of this class are created
automatically as part of PlotDataItem; these rarely need to be instantiated
directly.
The size, shape, pen, and fill brush may be set for each point individually
or for all points.
============================ ===============================================
**Signals:**
sigPlotChanged(self) Emitted when the data being plotted has changed
sigClicked(self, points, ev) Emitted when points are clicked. Sends a list
of all the points under the mouse pointer.
sigHovered(self, points, ev) Emitted when the item is hovered. Sends a list
of all the points under the mouse pointer.
============================ ===============================================
"""
#sigPointClicked = QtCore.Signal(object, object)
sigClicked = QtCore.Signal(object, object, object)
sigHovered = QtCore.Signal(object, object, object)
sigPlotChanged = QtCore.Signal(object)
def __init__(self, *args, **kargs):
"""
Accepts the same arguments as setData()
"""
profiler = debug.Profiler()
GraphicsObject.__init__(self)
self.picture = None # QPicture used for rendering when pxmode==False
self.fragmentAtlas = SymbolAtlas()
if screen := QtGui.QGuiApplication.primaryScreen():
self.fragmentAtlas.setDevicePixelRatio(screen.devicePixelRatio())
dtype = [
('x', float),
('y', float),
('size', float),
('symbol', object),
('pen', object),
('brush', object),
('visible', bool),
('data', object),
('hovered', bool),
('item', object),
('sourceRect', [
('x', int),
('y', int),
('w', int),
('h', int)
])
]
self.data = np.empty(0, dtype=dtype)
self.bounds = [None, None] ## caches data bounds
self._maxSpotWidth = 0 ## maximum size of the scale-variant portion of all spots
self._maxSpotPxWidth = 0 ## maximum size of the scale-invariant portion of all spots
self._pixmapFragments = Qt.internals.PrimitiveArray(QtGui.QPainter.PixmapFragment, 10)
self.opts = {
'pxMode': True,
'useCache': True, ## If useCache is False, symbols are re-drawn on every paint.
'antialias': getConfigOption('antialias'),
'compositionMode': None,
'name': None,
'symbol': 'o',
'size': 7,
'pen': fn.mkPen(getConfigOption('foreground')),
'brush': fn.mkBrush(100, 100, 150),
'hoverable': False,
'tip': 'x: {x:.3g}\ny: {y:.3g}\ndata={data}'.format,
}
self.opts.update(
{'hover' + opt.title(): _DEFAULT_STYLE[opt] for opt in ['symbol', 'size', 'pen', 'brush']}
)
profiler()
self.setData(*args, **kargs)
profiler('setData')
#self.setCacheMode(self.DeviceCoordinateCache)
# track when the tooltip is cleared so we only clear it once
# this allows another item in the VB to set the tooltip
self._toolTipCleared = True
def setData(self, *args, **kargs):
"""
**Ordered Arguments:**
* If there is only one unnamed argument, it will be interpreted like the 'spots' argument.
* If there are two unnamed arguments, they will be interpreted as sequences of x and y values.
====================== ===============================================================================================
**Keyword Arguments:**
*spots* Optional list of dicts. Each dict specifies parameters for a single spot:
{'pos': (x,y), 'size', 'pen', 'brush', 'symbol'}. This is just an alternate method
of passing in data for the corresponding arguments.
*x*,*y* 1D arrays of x,y values.
*pos* 2D structure of x,y pairs (such as Nx2 array or list of tuples)
*pxMode* If True, spots are always the same size regardless of scaling, and size is given in px.
Otherwise, size is in scene coordinates and the spots scale with the view. To ensure
effective caching, QPen and QBrush objects should be reused as much as possible.
Default is True
*symbol* can be one (or a list) of symbols. For a list of supported symbols, see
:func:`~ScatterPlotItem.setSymbol`. QPainterPath is also supported to specify custom symbol
shapes. To properly obey the position and size, custom symbols should be centered at (0,0) and
width and height of 1.0. Note that it is also possible to 'install' custom shapes by setting
ScatterPlotItem.Symbols[key] = shape.
*pen* The pen (or list of pens) to use for drawing spot outlines.
*brush* The brush (or list of brushes) to use for filling spots.
*size* The size (or list of sizes) of spots. If *pxMode* is True, this value is in pixels. Otherwise,
it is in the item's local coordinate system.
*data* a list of python objects used to uniquely identify each spot.
*hoverable* If True, sigHovered is emitted with a list of hovered points, a tool tip is shown containing
information about them, and an optional separate style for them is used. Default is False.
*tip* A string-valued function of a spot's (x, y, data) values. Set to None to prevent a tool tip
from being shown.
*hoverSymbol* A single symbol to use for hovered spots. Set to None to keep symbol unchanged. Default is None.
*hoverSize* A single size to use for hovered spots. Set to -1 to keep size unchanged. Default is -1.
*hoverPen* A single pen to use for hovered spots. Set to None to keep pen unchanged. Default is None.
*hoverBrush* A single brush to use for hovered spots. Set to None to keep brush unchanged. Default is None.
*useCache* (bool) By default, generated point graphics items are cached to
improve performance. Setting this to False can improve image quality
in certain situations.
*antialias* Whether to draw symbols with antialiasing. Note that if pxMode is True, symbols are
always rendered with antialiasing (since the rendered symbols can be cached, this
incurs very little performance cost)
*compositionMode* If specified, this sets the composition mode used when drawing the
scatter plot (see QPainter::CompositionMode in the Qt documentation).
*name* The name of this item. Names are used for automatically
generating LegendItem entries and by some exporters.
====================== ===============================================================================================
"""
oldData = self.data ## this causes cached pixmaps to be preserved while new data is registered.
self.clear() ## clear out all old data
self.addPoints(*args, **kargs)
def addPoints(self, *args, **kargs):
"""
Add new points to the scatter plot.
Arguments are the same as setData()
"""
## deal with non-keyword arguments
if len(args) == 1:
kargs['spots'] = args[0]
elif len(args) == 2:
kargs['x'] = args[0]
kargs['y'] = args[1]
elif len(args) > 2:
raise Exception('Only accepts up to two non-keyword arguments.')
## convert 'pos' argument to 'x' and 'y'
if 'pos' in kargs:
pos = kargs['pos']
if isinstance(pos, np.ndarray):
kargs['x'] = pos[:,0]
kargs['y'] = pos[:,1]
else:
x = []
y = []
for p in pos:
if isinstance(p, QtCore.QPointF):
x.append(p.x())
y.append(p.y())
else:
x.append(p[0])
y.append(p[1])
kargs['x'] = x
kargs['y'] = y
## determine how many spots we have
if 'spots' in kargs:
numPts = len(kargs['spots'])
elif 'y' in kargs and kargs['y'] is not None:
numPts = len(kargs['y'])
else:
kargs['x'] = []
kargs['y'] = []
numPts = 0
## Clear current SpotItems since the data references they contain will no longer be current
self.data['item'][...] = None
## Extend record array
oldData = self.data
self.data = np.empty(len(oldData)+numPts, dtype=self.data.dtype)
## note that np.empty initializes object fields to None and string fields to ''
self.data[:len(oldData)] = oldData
#for i in range(len(oldData)):
#oldData[i]['item']._data = self.data[i] ## Make sure items have proper reference to new array
newData = self.data[len(oldData):]
newData['size'] = -1 ## indicates to use default size
newData['visible'] = True
if 'spots' in kargs:
spots = kargs['spots']
for i in range(len(spots)):
spot = spots[i]
for k in spot:
if k == 'pos':
pos = spot[k]
if isinstance(pos, QtCore.QPointF):
x,y = pos.x(), pos.y()
else:
x,y = pos[0], pos[1]
newData[i]['x'] = x
newData[i]['y'] = y
elif k == 'pen':
newData[i][k] = _mkPen(spot[k])
elif k == 'brush':
newData[i][k] = _mkBrush(spot[k])
elif k in ['x', 'y', 'size', 'symbol', 'data']:
newData[i][k] = spot[k]
else:
raise Exception("Unknown spot parameter: %s" % k)
elif 'y' in kargs:
newData['x'] = kargs['x']
newData['y'] = kargs['y']
if 'name' in kargs:
self.opts['name'] = kargs['name']
if 'pxMode' in kargs:
self.setPxMode(kargs['pxMode'])
if 'antialias' in kargs:
self.opts['antialias'] = kargs['antialias']
if 'hoverable' in kargs:
self.opts['hoverable'] = bool(kargs['hoverable'])
if 'tip' in kargs:
self.opts['tip'] = kargs['tip']
if 'useCache' in kargs:
self.opts['useCache'] = kargs['useCache']
## Set any extra parameters provided in keyword arguments
for k in ['pen', 'brush', 'symbol', 'size']:
if k in kargs:
setMethod = getattr(self, 'set' + k[0].upper() + k[1:])
setMethod(kargs[k], update=False, dataSet=newData, mask=kargs.get('mask', None))
kh = 'hover' + k.title()
if kh in kargs:
vh = kargs[kh]
if k == 'pen':
vh = _mkPen(vh)
elif k == 'brush':
vh = _mkBrush(vh)
self.opts[kh] = vh
if 'data' in kargs:
self.setPointData(kargs['data'], dataSet=newData)
self.prepareGeometryChange()
self.informViewBoundsChanged()
self.bounds = [None, None]
self.invalidate()
self.updateSpots(newData)
self.sigPlotChanged.emit(self)
def invalidate(self):
## clear any cached drawing state
self.picture = None
self.update()
def getData(self):
return self.data['x'], self.data['y']
def implements(self, interface=None):
ints = ['plotData']
if interface is None:
return ints
return interface in ints
def name(self):
return self.opts.get('name', None)
def setPen(self, *args, **kargs):
"""Set the pen(s) used to draw the outline around each spot.
If a list or array is provided, then the pen for each spot will be set separately.
Otherwise, the arguments are passed to pg.mkPen and used as the default pen for
all spots which do not have a pen explicitly set."""
update = kargs.pop('update', True)
dataSet = kargs.pop('dataSet', self.data)
if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)):
pens = args[0]
if 'mask' in kargs and kargs['mask'] is not None:
pens = pens[kargs['mask']]
if len(pens) != len(dataSet):
raise Exception("Number of pens does not match number of points (%d != %d)" % (len(pens), len(dataSet)))
dataSet['pen'] = list(map(_mkPen, pens))
else:
self.opts['pen'] = _mkPen(*args, **kargs)
dataSet['sourceRect'] = 0
if update:
self.updateSpots(dataSet)
def setBrush(self, *args, **kargs):
"""Set the brush(es) used to fill the interior of each spot.
If a list or array is provided, then the brush for each spot will be set separately.
Otherwise, the arguments are passed to pg.mkBrush and used as the default brush for
all spots which do not have a brush explicitly set."""
update = kargs.pop('update', True)
dataSet = kargs.pop('dataSet', self.data)
if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)):
brushes = args[0]
if 'mask' in kargs and kargs['mask'] is not None:
brushes = brushes[kargs['mask']]
if len(brushes) != len(dataSet):
raise Exception("Number of brushes does not match number of points (%d != %d)" % (len(brushes), len(dataSet)))
dataSet['brush'] = list(map(_mkBrush, brushes))
else:
self.opts['brush'] = _mkBrush(*args, **kargs)
dataSet['sourceRect'] = 0
if update:
self.updateSpots(dataSet)
def setSymbol(self, symbol, update=True, dataSet=None, mask=None):
"""Set the symbol(s) used to draw each spot.
If a list or array is provided, then the symbol for each spot will be set separately.
Otherwise, the argument will be used as the default symbol for
all spots which do not have a symbol explicitly set.
**Supported symbols:**
* 'o' circle (default)
* 's' square
* 't' triangle
* 'd' diamond
* '+' plus
* 't1' triangle pointing upwards
* 't2' triangle pointing right side
* 't3' triangle pointing left side
* 'p' pentagon
* 'h' hexagon
* 'star'
* '|' vertical line
* '_' horizontal line
* 'x' cross
* 'arrow_up'
* 'arrow_right'
* 'arrow_down'
* 'arrow_left'
* 'crosshair'
* any QPainterPath to specify custom symbol shapes.
"""
if dataSet is None:
dataSet = self.data
if isinstance(symbol, np.ndarray) or isinstance(symbol, list):
symbols = symbol
if mask is not None:
symbols = symbols[mask]
if len(symbols) != len(dataSet):
raise Exception("Number of symbols does not match number of points (%d != %d)" % (len(symbols), len(dataSet)))
dataSet['symbol'] = symbols
else:
self.opts['symbol'] = symbol
self._spotPixmap = None
dataSet['sourceRect'] = 0
if update:
self.updateSpots(dataSet)
def setSize(self, size, update=True, dataSet=None, mask=None):
"""Set the size(s) used to draw each spot.
If a list or array is provided, then the size for each spot will be set separately.
Otherwise, the argument will be used as the default size for
all spots which do not have a size explicitly set."""
if dataSet is None:
dataSet = self.data
if isinstance(size, np.ndarray) or isinstance(size, list):
sizes = size
if mask is not None:
sizes = sizes[mask]
if len(sizes) != len(dataSet):
raise Exception("Number of sizes does not match number of points (%d != %d)" % (len(sizes), len(dataSet)))
dataSet['size'] = sizes
else:
self.opts['size'] = size
self._spotPixmap = None
dataSet['sourceRect'] = 0
if update:
self.updateSpots(dataSet)
def setPointsVisible(self, visible, update=True, dataSet=None, mask=None):
"""Set whether or not each spot is visible.
If a list or array is provided, then the visibility for each spot will be set separately.
Otherwise, the argument will be used for all spots."""
if dataSet is None:
dataSet = self.data
if isinstance(visible, np.ndarray) or isinstance(visible, list):
visibilities = visible
if mask is not None:
visibilities = visibilities[mask]
if len(visibilities) != len(dataSet):
raise Exception("Number of visibilities does not match number of points (%d != %d)" % (len(visibilities), len(dataSet)))
dataSet['visible'] = visibilities
else:
dataSet['visible'] = visible
dataSet['sourceRect'] = 0
if update:
self.updateSpots(dataSet)
def setPointData(self, data, dataSet=None, mask=None):
if dataSet is None:
dataSet = self.data
if isinstance(data, np.ndarray) or isinstance(data, list):
if mask is not None:
data = data[mask]
if len(data) != len(dataSet):
raise Exception("Length of meta data does not match number of points (%d != %d)" % (len(data), len(dataSet)))
## Bug: If data is a numpy record array, then items from that array must be copied to dataSet one at a time.
## (otherwise they are converted to tuples and thus lose their field names.
if isinstance(data, np.ndarray) and (data.dtype.fields is not None)and len(data.dtype.fields) > 1:
for i, rec in enumerate(data):
dataSet['data'][i] = rec
else:
dataSet['data'] = data
def setPxMode(self, mode):
if self.opts['pxMode'] == mode:
return
self.opts['pxMode'] = mode
self.invalidate()
def updateSpots(self, dataSet=None):
profiler = debug.Profiler() # noqa: profiler prints on GC
if dataSet is None:
dataSet = self.data
invalidate = False
if self.opts['pxMode'] and self.opts['useCache']:
mask = dataSet['sourceRect']['w'] == 0
if np.any(mask):
invalidate = True
coords = self.fragmentAtlas[
list(zip(*self._style(['symbol', 'size', 'pen', 'brush'], data=dataSet, idx=mask)))
]
dataSet['sourceRect'][mask] = coords
self._maybeRebuildAtlas()
else:
invalidate = True
self._updateMaxSpotSizes(data=dataSet)
if invalidate:
self.invalidate()
def _maybeRebuildAtlas(self, threshold=4, minlen=1000):
n = len(self.fragmentAtlas)
if (n > minlen) and (n > threshold * len(self.data)):
self.fragmentAtlas.rebuild(
list(zip(*self._style(['symbol', 'size', 'pen', 'brush'])))
)
self.data['sourceRect'] = 0
self.updateSpots()
def _style(self, opts, data=None, idx=None, scale=None):
if data is None:
data = self.data
if idx is None:
idx = np.s_[:]
for opt in opts:
col = data[opt][idx]
if col.base is not None:
col = col.copy()
if self.opts['hoverable']:
val = self.opts['hover' + opt.title()]
if val != _DEFAULT_STYLE[opt]:
col[data['hovered'][idx]] = val
col[np.equal(col, _DEFAULT_STYLE[opt])] = self.opts[opt]
if opt == 'size' and scale is not None:
col *= scale
yield col
def _updateMaxSpotSizes(self, **kwargs):
if self.opts['pxMode'] and self.opts['useCache']:
w, pw = 0, self.fragmentAtlas.maxWidth
else:
w, pw = max(itertools.chain([(self._maxSpotWidth, self._maxSpotPxWidth)],
self._measureSpotSizes(**kwargs)))
self._maxSpotWidth = w
self._maxSpotPxWidth = pw
self.bounds = [None, None]
def _measureSpotSizes(self, **kwargs):
"""Generate pairs (width, pxWidth) for spots in data"""
styles = zip(*self._style(['size', 'pen'], **kwargs))
if self.opts['pxMode']:
for size, pen in styles:
yield 0, size + pen.widthF()
else:
for size, pen in styles:
if pen.isCosmetic():
yield size, pen.widthF()
else:
yield size + pen.widthF(), 0
def clear(self):
"""Remove all spots from the scatter plot"""
#self.clearItems()
self._maxSpotWidth = 0
self._maxSpotPxWidth = 0
self.data = np.empty(0, dtype=self.data.dtype)
self.bounds = [None, None]
self.invalidate()
def dataBounds(self, ax, frac=1.0, orthoRange=None):
if frac >= 1.0 and orthoRange is None and self.bounds[ax] is not None:
return self.bounds[ax]
#self.prepareGeometryChange()
if self.data is None or len(self.data) == 0:
return (None, None)
if ax == 0:
d = self.data['x']
d2 = self.data['y']
elif ax == 1:
d = self.data['y']
d2 = self.data['x']
else:
raise ValueError("Invalid axis value")
if orthoRange is not None:
mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1])
d = d[mask]
if d.size == 0:
return (None, None)
if frac >= 1.0:
self.bounds[ax] = (np.nanmin(d) - self._maxSpotWidth*0.7072, np.nanmax(d) + self._maxSpotWidth*0.7072)
return self.bounds[ax]
elif frac <= 0.0:
raise Exception("Value for parameter 'frac' must be > 0. (got %s)" % str(frac))
else:
mask = np.isfinite(d)
d = d[mask]
return np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])
def pixelPadding(self):
return self._maxSpotPxWidth*0.7072
def boundingRect(self):
(xmn, xmx) = self.dataBounds(ax=0)
(ymn, ymx) = self.dataBounds(ax=1)
if xmn is None or xmx is None:
xmn = 0
xmx = 0
if ymn is None or ymx is None:
ymn = 0
ymx = 0
px = py = 0.0
pxPad = self.pixelPadding()
if pxPad > 0:
# determine length of pixel in local x, y directions
px, py = self.pixelVectors()
try:
px = 0 if px is None else px.length()
except OverflowError:
px = 0
try:
py = 0 if py is None else py.length()
except OverflowError:
py = 0
# return bounds expanded by pixel size
px *= pxPad
py *= pxPad
return QtCore.QRectF(xmn-px, ymn-py, (2*px)+xmx-xmn, (2*py)+ymx-ymn)
def viewTransformChanged(self):
self.prepareGeometryChange()
GraphicsObject.viewTransformChanged(self)
self.bounds = [None, None]
def setExportMode(self, *args, **kwds):
GraphicsObject.setExportMode(self, *args, **kwds)
self.invalidate()
@debug.warnOnException ## raising an exception here causes crash
def paint(self, p, option, widget):
profiler = debug.Profiler()
cmode = self.opts.get('compositionMode', None)
if cmode is not None:
p.setCompositionMode(cmode)
#p.setPen(fn.mkPen('r'))
#p.drawRect(self.boundingRect())
if self._exportOpts is not False:
aa = self._exportOpts.get('antialias', True)
scale = self._exportOpts.get('resolutionScale', 1.0) ## exporting to image; pixel resolution may have changed
else:
aa = self.opts['antialias']
scale = 1.0
if self.opts['pxMode'] is True:
# Cull points that are outside view
viewMask = self._maskAt(self.viewRect())
# Map points using painter's world transform so they are drawn with pixel-valued sizes
pts = np.vstack([self.data['x'], self.data['y']])
pts = fn.transformCoordinates(p.transform(), pts)
pts = fn.clip_array(pts, -2 ** 30, 2 ** 30) # prevent Qt segmentation fault.
p.resetTransform()
if self.opts['useCache'] and self._exportOpts is False:
# Draw symbols from pre-rendered atlas
dpr = self.fragmentAtlas.devicePixelRatio()
if widget is not None and (dpr_new := widget.devicePixelRatioF()) != dpr:
# force a re-render if dpr changed
dpr = dpr_new
self.fragmentAtlas.setDevicePixelRatio(dpr)
self.fragmentAtlas.clear()
self.data['sourceRect'] = 0
self.updateSpots()
# x, y is the center of the target rect
xy = pts[:, viewMask].T
sr = self.data['sourceRect'][viewMask]
self._pixmapFragments.resize(sr.size)
frags = self._pixmapFragments.ndarray()
frags[:, 0:2] = xy
frags[:, 2:6] = np.frombuffer(sr, dtype=int).reshape((-1, 4)) # sx, sy, sw, sh
frags[:, 6:10] = [1/dpr, 1/dpr, 0.0, 1.0] # scaleX, scaleY, rotation, opacity
profiler('prep')
drawargs = self._pixmapFragments.drawargs()
p.drawPixmapFragments(*drawargs, self.fragmentAtlas.pixmap)
profiler('draw')
else:
# render each symbol individually
p.setRenderHint(p.RenderHint.Antialiasing, aa)
for pt, style in zip(
pts[:, viewMask].T,
zip(*(self._style(['symbol', 'size', 'pen', 'brush'], idx=viewMask, scale=scale)))
):
p.resetTransform()
p.translate(*pt)
drawSymbol(p, *style)
else:
if self.picture is None:
self.picture = QtGui.QPicture()
p2 = QtGui.QPainter(self.picture)
for x, y, style in zip(
self.data['x'],
self.data['y'],
zip(*self._style(['symbol', 'size', 'pen', 'brush'], scale=scale))
):
p2.resetTransform()
p2.translate(x, y)
drawSymbol(p2, *style)
p2.end()
p.setRenderHint(p.RenderHint.Antialiasing, aa)
self.picture.play(p)
def points(self):
m = np.equal(self.data['item'], None)
for i in np.argwhere(m)[:, 0]:
rec = self.data[i]
if rec['item'] is None:
rec['item'] = SpotItem(rec, self, i)
return self.data['item']
def pointsAt(self, pos):
return self.points()[self._maskAt(pos)][::-1]
def _maskAt(self, obj):
"""
Return a boolean mask indicating all points that overlap obj, a QPointF or QRectF.
"""
if isinstance(obj, QtCore.QPointF):
l = r = obj.x()
t = b = obj.y()
elif isinstance(obj, QtCore.QRectF):
l = obj.left()
r = obj.right()
t = obj.top()
b = obj.bottom()
else:
raise TypeError
if self.opts['pxMode'] and self.opts['useCache']:
w = self.data['sourceRect']['w']
h = self.data['sourceRect']['h']
else:
s, = self._style(['size'])
w = h = s
w = w / 2
h = h / 2
if self.opts['pxMode']:
# determine length of pixel in local x, y directions
px, py = self.pixelVectors()
try:
px = 0 if px is None else px.length()
except OverflowError:
px = 0
try:
py = 0 if py is None else py.length()
except OverflowError:
py = 0
w *= px
h *= py
return (self.data['visible']
& (self.data['x'] + w > l)
& (self.data['x'] - w < r)
& (self.data['y'] + h > t)
& (self.data['y'] - h < b))
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.MouseButton.LeftButton:
pts = self.pointsAt(ev.pos())
if len(pts) > 0:
self.ptsClicked = pts
ev.accept()
self.sigClicked.emit(self, self.ptsClicked, ev)
else:
#print "no spots"
ev.ignore()
else:
ev.ignore()
def hoverEvent(self, ev):
if self.opts['hoverable']:
old = self.data['hovered']
if ev.exit:
new = np.zeros_like(self.data['hovered'])
else:
new = self._maskAt(ev.pos())
if self._hasHoverStyle():
self.data['sourceRect'][old ^ new] = 0
self.data['hovered'] = new
self.updateSpots()
points = self.points()[new][::-1]
# Show information about hovered points in a tool tip
vb = self.getViewBox()
if vb is not None and self.opts['tip'] is not None:
if len(points) > 0:
cutoff = 3
tip = [self.opts['tip'](x=pt.pos().x(), y=pt.pos().y(), data=pt.data())
for pt in points[:cutoff]]
if len(points) > cutoff:
tip.append('({} others...)'.format(len(points) - cutoff))
vb.setToolTip('\n\n'.join(tip))
self._toolTipCleared = False
elif not self._toolTipCleared:
vb.setToolTip("")
self._toolTipCleared = True
self.sigHovered.emit(self, points, ev)
def _hasHoverStyle(self):
return any(self.opts['hover' + opt.title()] != _DEFAULT_STYLE[opt]
for opt in ['symbol', 'size', 'pen', 'brush'])
| ScatterPlotItem |
python | huggingface__transformers | src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py | {
"start": 1654,
"end": 4575
} | class ____(ImagesKwargs, total=False):
r"""
apply_ocr (`bool`, *optional*, defaults to `True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
the `apply_ocr` parameter in the `preprocess` method.
ocr_lang (`str`, *optional*):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
tesseract_config (`str`, *optional*):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
`preprocess` method.
"""
apply_ocr: bool
ocr_lang: Optional[str]
tesseract_config: Optional[str]
def normalize_box(box, width, height):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def apply_tesseract(
image: np.ndarray,
lang: Optional[str],
tesseract_config: Optional[str],
input_data_format: Optional[Union[ChannelDimension, str]] = None,
):
"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
# apply OCR
pil_image = to_pil_image(image, input_data_format=input_data_format)
image_width, image_height = pil_image.size
data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config)
words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
actual_boxes = []
for x, y, w, h in zip(left, top, width, height):
actual_box = [x, y, x + w, y + h]
actual_boxes.append(actual_box)
# finally, normalize the bounding boxes
normalized_boxes = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(box, image_width, image_height))
assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
return words, normalized_boxes
@requires(backends=("vision",))
| LayoutLMv3ImageProcessorKwargs |
python | scrapy__scrapy | tests/test_contracts.py | {
"start": 984,
"end": 1154
} | class ____(Contract):
name = "custom_fail_contract"
def adjust_request_args(self, args):
raise TypeError("Error in adjust_request_args")
| CustomFailContract |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 5462,
"end": 7013
} | class ____(
NamedTuple(
"_ResumeRunArgs",
[
# Deprecated, only needed for back-compat since it can be pulled from the DagsterRun
("job_origin", JobPythonOrigin),
("run_id", str),
("instance_ref", Optional[InstanceRef]),
("set_exit_code_on_failure", Optional[bool]),
],
)
):
def __new__(
cls,
job_origin: JobPythonOrigin,
run_id: str,
instance_ref: Optional[InstanceRef],
set_exit_code_on_failure: Optional[bool] = None,
):
return super().__new__(
cls,
job_origin=check.inst_param(
job_origin,
"job_origin",
JobPythonOrigin,
),
run_id=check.str_param(run_id, "run_id"),
instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef),
set_exit_code_on_failure=(
True
if check.opt_bool_param(set_exit_code_on_failure, "set_exit_code_on_failure")
is True
else None
), # for back-compat
)
def get_command_args(self) -> Sequence[str]:
return [
*_get_entry_point(self.job_origin),
"api",
"resume_run",
serialize_value(self),
]
@whitelist_for_serdes(
storage_name="ExecuteExternalPipelineArgs",
storage_field_names={
"job_origin": "pipeline_origin",
"run_id": "pipeline_run_id",
},
)
| ResumeRunArgs |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_bigquery.py | {
"start": 2889,
"end": 34589
} | class ____(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"bigquery", "v2", http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.read_gbq")
@pytest.mark.parametrize("df_type", ["pandas", "polars"])
def test_get_df(self, mock_read_gbq, df_type):
import pandas as pd
import polars as pl
mock_read_gbq.return_value = pd.DataFrame({"a": [1, 2, 3]})
result = self.hook.get_df("select 1", df_type=df_type)
expected_type = pd.DataFrame if df_type == "pandas" else pl.DataFrame
assert isinstance(result, expected_type)
assert result.shape == (3, 1)
assert result.columns == ["a"]
if df_type == "pandas":
assert result["a"].tolist() == [1, 2, 3]
else:
assert result.to_series().to_list() == [1, 2, 3]
mock_read_gbq.assert_called_once_with(
"select 1", credentials=CREDENTIALS, dialect="legacy", project_id=PROJECT_ID
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
key_one = True
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
src_fmt_configs = {"test_config_unknown": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
# This config should raise a value error.
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert "test_config_known" in src_fmt_configs, (
"src_fmt_configs should contain al known src_fmt_configs"
)
assert "compatibility_val" in src_fmt_configs, (
"_validate_src_fmt_configs should add backward_compatibility config"
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
mock_row_iterator = _EmptyRowIterator()
mock_client.return_value.list_rows.return_value = mock_row_iterator
for return_iterator, expected in [(False, []), (True, mock_row_iterator)]:
actual = self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
return_iterator=return_iterator,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.assert_has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token="page123",
start_index=5,
retry=DEFAULT_RETRY,
)
assert actual == expected
mock_table.from_api_repr.reset_mock()
mock_client.return_value.list_rows.reset_mock()
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
mock_row_iterator = _EmptyRowIterator()
mock_client.return_value.list_rows.return_value = mock_row_iterator
for return_iterator, expected in [(False, []), (True, mock_row_iterator)]:
actual = self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
return_iterator=return_iterator,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token="page123",
selected_fields=None,
start_index=5,
retry=DEFAULT_RETRY,
)
assert actual == expected
mock_table.from_api_repr.reset_mock()
mock_client.return_value.list_rows.reset_mock()
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(
dataset_id=DATASET_ID, table_id=TABLE_ID, table_resource=table_resource, project_id=PROJECT_ID
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={"projectId": PROJECT_ID, "datasetId": view_dataset, "tableId": view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert any(
entry.role == view_access.role
and entry.entity_type == view_access.entity_type
and entry.entity_id == view_access.entity_id
for entry in dataset.access_entries
), f"View access entry not found in {dataset.access_entries}"
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={"projectId": PROJECT_ID, "datasetId": view_dataset, "tableId": view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("tenacity.nap.time.sleep", mock.MagicMock())
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_get_job_credentials_refresh_error(self, mock_client):
error = "Unable to acquire impersonated credentials"
response_body = "<!DOCTYPE html>\n<html lang=en>\n <meta charset=utf-8>\n"
mock_job = mock.MagicMock(
job_id="123456_hash",
error_result=False,
state="PENDING",
done=lambda: False,
)
mock_client.return_value.get_job.side_effect = [RefreshError(error, response_body), mock_job]
job = self.hook.get_job(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_any_call(location=LOCATION, project_id=PROJECT_ID)
assert mock_client.call_count == 2
assert job == mock_job
@pytest.mark.parametrize(
"error",
[
RefreshError("Other error", "test body"),
ValueError(),
],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_get_job_credentials_error(self, mock_client, error):
mock_client.return_value.get_job.side_effect = error
with pytest.raises(type(error)):
self.hook.get_job(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{"name": "id", "type": "STRING", "mode": "REQUIRED"},
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{
"name": "salary",
"type": "INTEGER",
"mode": "REQUIRED",
"policyTags": {"names": ["sensitive"]},
},
{"name": "not_changed", "type": "INTEGER", "mode": "REQUIRED"},
{
"name": "subrecord",
"type": "RECORD",
"mode": "REQUIRED",
"fields": [
{
"name": "field_1",
"type": "STRING",
"mode": "REQUIRED",
"policyTags": {"names": ["sensitive"]},
},
],
},
]
}
schema_fields_updates = [
{"name": "emp_name", "description": "Name of employee", "policyTags": {"names": ["sensitive"]}},
{
"name": "salary",
"description": "Monthly salary in USD",
"policyTags": {},
},
{
"name": "subrecord",
"description": "Some Desc",
"fields": [
{"name": "field_1", "description": "Some nested desc"},
],
},
]
expected_result_schema = {
"fields": [
{
"name": "emp_name",
"type": "STRING",
"mode": "REQUIRED",
"description": "Name of employee",
"policyTags": {"names": ["sensitive"]},
},
{
"name": "salary",
"type": "INTEGER",
"mode": "REQUIRED",
"description": "Monthly salary in USD",
"policyTags": {},
},
{"name": "not_changed", "type": "INTEGER", "mode": "REQUIRED"},
{
"name": "subrecord",
"type": "RECORD",
"mode": "REQUIRED",
"description": "Some Desc",
"fields": [
{
"name": "field_1",
"type": "STRING",
"mode": "REQUIRED",
"description": "Some nested desc",
"policyTags": {"names": ["sensitive"]},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={"schema": expected_result_schema},
fields=["schema"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "REQUIRED"},
{"name": "not_changed", "type": "INTEGER", "mode": "REQUIRED"},
{
"name": "subrecord",
"type": "RECORD",
"mode": "REQUIRED",
"fields": [
{"name": "field_1", "type": "STRING", "mode": "REQUIRED"},
],
},
]
}
schema_fields_updates = [
{"name": "emp_name", "description": "Name of employee"},
{
"name": "salary",
"description": "Monthly salary in USD",
"policyTags": {"names": ["sensitive"]},
},
{
"name": "subrecord",
"description": "Some Desc",
"fields": [
{"name": "field_1", "description": "Some nested desc"},
],
},
]
expected_result_schema = {
"fields": [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED", "description": "Name of employee"},
{
"name": "salary",
"type": "INTEGER",
"mode": "REQUIRED",
"description": "Monthly salary in USD",
},
{"name": "not_changed", "type": "INTEGER", "mode": "REQUIRED"},
{
"name": "subrecord",
"type": "RECORD",
"mode": "REQUIRED",
"description": "Some Desc",
"fields": [
{
"name": "field_1",
"type": "STRING",
"mode": "REQUIRED",
"description": "Some nested desc",
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={"schema": expected_result_schema},
fields=["schema"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@pytest.mark.parametrize("nowait", [True, False])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job, nowait):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf, job_id=JOB_ID, project_id=PROJECT_ID, location=LOCATION, nowait=nowait
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
"configuration": job_conf,
"jobReference": {"jobId": JOB_ID, "projectId": PROJECT_ID, "location": LOCATION},
},
mock_client.return_value,
)
if nowait:
mock_query_job.from_api_repr.return_value._begin.assert_called_once()
mock_query_job.from_api_repr.return_value.result.assert_not_called()
else:
mock_query_job.from_api_repr.return_value._begin.assert_not_called()
mock_query_job.from_api_repr.return_value.result.assert_called_once()
def test_dbapi_get_uri(self):
assert self.hook.get_uri().startswith("bigquery://")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.md5")
@pytest.mark.parametrize(
("test_dag_id", "expected_job_id"),
[("test-dag-id-1.1", "airflow_test_dag_id_1_1_test_job_id_2020_01_23T00_00_00_hash")],
ids=["test-dag-id-1.1"],
)
def test_job_id_validity(self, mock_md5, test_dag_id, expected_job_id):
hash_ = "hash"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
job_id = self.hook.generate_job_id(
job_id=None,
dag_id=test_dag_id,
task_id="test_job_id",
logical_date=None,
configuration=configuration,
run_after=datetime(2020, 1, 23),
)
assert job_id == expected_job_id
def test_get_run_after_or_logical_date(self):
"""Test get_run_after_or_logical_date for both Airflow 3.x and pre-3.0 behavior."""
if AIRFLOW_V_3_0_PLUS:
ctx = Context(
dag_run=DagRun(
run_type=DagRunType.MANUAL,
start_date=pendulum.datetime(2025, 2, 2, tz="UTC"),
),
logical_date=pendulum.datetime(2025, 1, 1, tz="UTC"),
)
assert self.hook.get_run_after_or_logical_date(ctx) == pendulum.datetime(2025, 2, 2, tz="UTC")
ctx = Context(
dag_run=DagRun(
run_type=DagRunType.SCHEDULED,
start_date=pendulum.datetime(2025, 2, 2, tz="UTC"),
),
logical_date=pendulum.datetime(2025, 1, 1, tz="UTC"),
)
assert self.hook.get_run_after_or_logical_date(ctx) == pendulum.datetime(2025, 2, 2, tz="UTC")
else:
ctx = Context(
dag_run=DagRun(
run_type=DagRunType.MANUAL,
start_date=pendulum.datetime(2025, 2, 2, tz="UTC"),
),
logical_date=pendulum.datetime(2025, 1, 1, tz="UTC"),
)
assert self.hook.get_run_after_or_logical_date(ctx) == pendulum.datetime(2025, 1, 1, tz="UTC")
ctx = Context(
dag_run=DagRun(
run_type=DagRunType.SCHEDULED,
start_date=pendulum.datetime(2025, 2, 2, tz="UTC"),
),
logical_date=pendulum.datetime(2025, 1, 1, tz="UTC"),
)
assert self.hook.get_run_after_or_logical_date(ctx) == pendulum.datetime(2025, 2, 2, tz="UTC")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_job",
return_value=mock.MagicMock(spec=CopyJob),
)
def test_query_results__not_query_job_exception(self, _):
with pytest.raises(AirflowException, match="query job"):
self.hook.get_query_results(job_id=JOB_ID, location=LOCATION)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_job",
return_value=mock.MagicMock(spec=QueryJob, state="RUNNING"),
)
def test_query_results__job_not_done_exception(self, _):
with pytest.raises(AirflowException, match="DONE state"):
self.hook.get_query_results(job_id=JOB_ID, location=LOCATION)
@pytest.mark.parametrize(
("selected_fields", "result"),
[
(None, [{"a": 1, "b": 2}, {"a": 3, "b": 4}]),
("a", [{"a": 1}, {"a": 3}]),
("a,b", [{"a": 1, "b": 2}, {"a": 3, "b": 4}]),
("b,a", [{"a": 1, "b": 2}, {"a": 3, "b": 4}]),
],
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_job",
return_value=mock.MagicMock(
spec=QueryJob,
state="DONE",
result=mock.MagicMock(return_value=[{"a": 1, "b": 2}, {"a": 3, "b": 4}]),
),
)
def test_query_results(self, _, selected_fields, result):
assert (
self.hook.get_query_results(job_id=JOB_ID, location=LOCATION, selected_fields=selected_fields)
== result
)
def test_split_tablename_internal_need_default_project(self):
with pytest.raises(ValueError, match="INTERNAL: No default project is specified"):
self.hook.split_tablename("dataset.table", None)
@pytest.mark.parametrize(
("project_expected", "dataset_expected", "table_expected", "table_input"),
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
],
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = self.hook.split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@pytest.mark.parametrize(
("table_input", "var_name", "exception_message"),
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
],
)
def test_split_tablename_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(ValueError, match=exception_message.format(table_input)):
self.hook.split_tablename(table_input, default_project_id, var_name)
@pytest.mark.db_test
| TestBigQueryHookMethods |
python | ipython__ipython | tests/test_oinspect.py | {
"start": 3242,
"end": 3531
} | class ____(object):
"""This is the class docstring."""
def __init__(self, x, y=1):
"""This is the constructor docstring."""
def __call__(self, *a, **kw):
"""This is the call docstring."""
def method(self, x, z=2):
"""Some method's docstring"""
| Call |
python | pypa__pipenv | pipenv/vendor/click/parser.py | {
"start": 6770,
"end": 7822
} | class ____:
def __init__(self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(
self,
value: t.Union[t.Optional[str], t.Sequence[t.Optional[str]]],
state: "ParsingState",
) -> None:
if self.nargs > 1:
assert value is not None
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage(
_("Argument {name!r} takes {nargs} values.").format(
name=self.dest, nargs=self.nargs
)
)
if self.nargs == -1 and self.obj.envvar is not None and value == ():
# Replace empty tuple with None so that a value from the
# environment may be tried.
value = None
state.opts[self.dest] = value # type: ignore
state.order.append(self.obj)
| Argument |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 129519,
"end": 130013
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("user_id", "organization_id", "client_mutation_id")
user_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="userId")
organization_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="organizationId"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| RemoveOutsideCollaboratorInput |
python | getsentry__sentry | src/sentry/notifications/utils/__init__.py | {
"start": 18507,
"end": 21039
} | class ____(PerformanceProblemContext):
def to_dict(self) -> dict[str, Any]:
return {
"span_evidence_key_value": [
{"key": _("Transaction"), "value": self.transaction},
{"key": _("Starting Span"), "value": self.starting_span},
{
"key": _("Parallelizable Spans"),
"value": self.parallelizable_spans,
"is_multi_value": True,
},
],
"transaction_duration": self.transaction_duration,
"slow_span_duration": self.time_saved,
}
@property
def starting_span(self) -> str:
if not self.problem.cause_span_ids or len(self.problem.cause_span_ids) < 1:
return ""
starting_span_id = self.problem.cause_span_ids[0]
return self._find_span_desc_by_id(starting_span_id)
@property
def parallelizable_spans(self) -> list[str]:
if not self.problem.offender_span_ids or len(self.problem.offender_span_ids) < 1:
return [""]
offender_span_ids = self.problem.offender_span_ids
return [self._find_span_desc_by_id(id) for id in offender_span_ids]
def _find_span_desc_by_id(self, id: str) -> str:
return get_span_evidence_value(self._find_span_by_id(id))
@property
def time_saved(self) -> float:
"""
Calculates the cost saved by running spans in parallel,
this is the maximum time saved of running all independent queries in parallel
note, maximum means it does not account for db connection times and overhead associated with parallelization,
this is where thresholds come in
"""
independent_spans = [self._find_span_by_id(id) for id in self.problem.offender_span_ids]
consecutive_spans = [self._find_span_by_id(id) for id in self.problem.cause_span_ids or ()]
total_duration = self._sum_span_duration(consecutive_spans)
max_independent_span_duration = max(
[self.get_span_duration(span).total_seconds() * 1000 for span in independent_spans]
)
sum_of_dependent_span_durations = 0.0
for span in consecutive_spans:
if span not in independent_spans:
sum_of_dependent_span_durations += (
self.get_span_duration(span).total_seconds() * 1000
)
return total_duration - max(max_independent_span_duration, sum_of_dependent_span_durations)
| ConsecutiveDBQueriesProblemContext |
python | getsentry__sentry | src/sentry/api/event_search.py | {
"start": 26128,
"end": 64769
} | class ____(NodeVisitor[list[QueryToken]]):
# `tuple[...]` is used for the typing of `children` because there isn't
# a way to represent positional-heterogenous lists -- but they are
# actually lists
unwrapped_exceptions = (InvalidSearchQuery, IncompatibleMetricsQuery)
def __init__(
self,
config: SearchConfig[Any],
params: ParamsType | None = None,
get_field_type: Callable[[str], str | None] | None = None,
get_function_result_type: Callable[[str], str | None] | None = None,
) -> None:
super().__init__()
self.config = config
if TYPE_CHECKING:
from sentry.search.events.builder.discover import UnresolvedQuery
@functools.cache
def _get_fallback_builder() -> UnresolvedQuery:
# Avoid circular import
from sentry.search.events.builder.discover import UnresolvedQuery
# TODO: read dataset from config
return UnresolvedQuery(
dataset=Dataset.Discover,
params=params if params is not None else {},
config=QueryBuilderConfig(functions_acl=list(FUNCTIONS)),
)
if get_field_type is not None:
self.get_field_type = get_field_type
else:
self.get_field_type = _get_fallback_builder().get_field_type
if get_function_result_type is not None:
self.get_function_result_type = get_function_result_type
else:
self.get_function_result_type = _get_fallback_builder().get_function_result_type
@cached_property
def key_mappings_lookup(self) -> dict[str, str]:
return {
source_field: target_field
for target_field, source_fields in self.config.key_mappings.items()
for source_field in source_fields
}
def is_numeric_key(self, key: str) -> bool:
return (
key in self.config.numeric_keys
or is_measurement(key)
or is_span_op_breakdown(key)
or self.get_field_type(key) in ["number", "integer"]
or self.is_duration_key(key)
or self.is_size_key(key)
)
def is_duration_key(self, key: str) -> bool:
duration_types = [*DURATION_UNITS, "duration"]
return (
key in self.config.duration_keys
or is_duration_measurement(key)
or is_span_op_breakdown(key)
or self.get_field_type(key) in duration_types
)
def is_size_key(self, key: str) -> bool:
return self.get_field_type(key) in SIZE_UNITS
def is_date_key(self, key: str) -> bool:
return key in self.config.date_keys
def is_boolean_key(self, key: str) -> bool:
return key in self.config.boolean_keys
def visit_search(
self,
node: Node,
children: tuple[str, Node | _RecursiveList[QueryToken]],
) -> list[QueryToken]:
if isinstance(children[1], Node): # empty search
return []
else:
return remove_optional_nodes(flatten(children[1]))
def visit_term(
self,
node: Node,
children: tuple[_RecursiveList[QueryToken], str],
) -> list[QueryToken]:
return remove_optional_nodes(flatten(children[0]))
def visit_boolean_operator(self, node: Node, children: tuple[QueryOp]) -> QueryOp:
if not self.config.allow_boolean:
raise InvalidSearchQuery(
'Boolean statements containing "OR" or "AND" are not supported in this search'
)
return children[0]
def visit_free_text_unquoted(self, node: Node, children: object) -> str | None:
return node.text.strip(" ") or None
def visit_free_text(self, node: Node, children: tuple[str]) -> SearchFilter | None:
if not children[0]:
return None
# Free text searches need to be treated like they were wildcards
return SearchFilter(
SearchKey(self.config.free_text_key),
"=",
SearchValue(wrap_free_text(children[0], self.config.wildcard_free_text)),
)
def visit_paren_group(
self,
node: Node,
children: tuple[
str,
str,
_RecursiveList[QueryToken],
str,
],
) -> SearchFilter | ParenExpression | list[QueryToken]:
if not self.config.allow_boolean:
# It's possible to have a valid search that includes parens, so we
# can't just error out when we find a paren expression.
return SearchFilter(
SearchKey(self.config.free_text_key),
"=",
SearchValue(wrap_free_text(node.text, self.config.wildcard_free_text)),
)
flattened = remove_optional_nodes(flatten(children[2]))
if len(flattened) == 0:
return []
return ParenExpression(flattened)
# --- Start of filter visitors
def _handle_basic_filter(
self, search_key: SearchKey, operator: str, search_value: SearchValue
) -> SearchFilter:
# If a date or numeric key gets down to the basic filter, then it means
# that the value wasn't in a valid format, so raise here.
if self.is_date_key(search_key.name):
raise InvalidSearchQuery(
f"{search_key.name}: Invalid date: {search_value.raw_value}. Expected +/-duration (e.g. +1h) or ISO 8601-like (e.g. {datetime.now().isoformat()[:-4]})."
)
if self.is_boolean_key(search_key.name):
raise InvalidSearchQuery(
f"{search_key.name}: Invalid boolean: {search_value.raw_value}. Expected true, 1, false, or 0."
)
if self.is_numeric_key(search_key.name):
raise InvalidSearchQuery(
f"{search_key.name}: Invalid number: {search_value.raw_value}. Expected number then optional k, m, or b suffix (e.g. 500k)."
)
return SearchFilter(search_key, operator, search_value)
def _handle_numeric_filter(
self, search_key: SearchKey, operator: str, search_value: tuple[str, str]
) -> SearchFilter:
operator = get_operator_value(operator)
try:
search_value_obj = SearchValue(parse_numeric_value(*search_value))
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
return SearchFilter(search_key, operator, search_value_obj)
def visit_date_filter(
self,
node: Node,
children: tuple[
SearchKey,
Node, # :
str, # operator
str, # datetime value
],
) -> SearchFilter:
(search_key, _, operator, search_value_s) = children
if self.is_date_key(search_key.name):
try:
search_value_dt = parse_datetime_string(search_value_s)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
return SearchFilter(search_key, operator, SearchValue(search_value_dt))
search_value_s = operator + search_value_s if operator != "=" else search_value_s
return self._handle_basic_filter(search_key, "=", SearchValue(search_value_s))
def visit_specific_date_filter(
self,
node: Node,
children: tuple[
SearchKey,
Node, # :
str, # date value
],
) -> SearchFilter | list[SearchFilter]:
# If we specify a specific date, it means any event on that day, and if
# we specify a specific datetime then it means a few minutes interval
# on either side of that datetime
(search_key, _, date_value) = children
if not self.is_date_key(search_key.name):
return self._handle_basic_filter(search_key, "=", SearchValue(date_value))
try:
from_val, to_val = parse_datetime_value(date_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
# TODO: Handle negations here. This is tricky because these will be
# separate filters, and to negate this range we need (< val or >= val).
# We currently AND all filters together, so we'll need extra logic to
# handle. Maybe not necessary to allow negations for this.
return [
SearchFilter(search_key, ">=", SearchValue(from_val[0])),
SearchFilter(search_key, "<", SearchValue(to_val[0])),
]
def visit_rel_date_filter(
self,
node: Node,
children: tuple[
SearchKey,
Node, # :
Node, # date filter value
],
) -> SearchFilter:
(search_key, _, value) = children
if self.is_date_key(search_key.name):
try:
dt_range = parse_datetime_range(value.text)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
# TODO: Handle negations
if dt_range[0] is not None:
operator = ">="
search_value = dt_range[0][0]
else:
operator = "<="
search_value = dt_range[1][0]
return SearchFilter(search_key, operator, SearchValue(search_value))
return self._handle_basic_filter(search_key, "=", SearchValue(value.text))
def visit_duration_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
Node | tuple[str], # operator if present
tuple[str, str], # value and unit
],
) -> SearchFilter:
(negation, search_key, _, operator, search_value) = children
if self.is_duration_key(search_key.name) or self.is_numeric_key(search_key.name):
operator_s = handle_negation(negation, operator)
else:
operator_s = get_operator_value(operator)
if self.is_duration_key(search_key.name):
try:
search_value_f = parse_duration(*search_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
return SearchFilter(search_key, operator_s, SearchValue(search_value_f))
# Durations overlap with numeric `m` suffixes
if self.is_numeric_key(search_key.name):
return self._handle_numeric_filter(search_key, operator_s, search_value)
search_value_s = "".join(search_value)
search_value_s = (
operator_s + search_value_s if operator_s not in ("=", "!=") else search_value_s
)
operator_s = "!=" if is_negated(negation) else "="
return self._handle_basic_filter(search_key, operator_s, SearchValue(search_value_s))
def visit_size_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
Node | tuple[str], # operator if present
tuple[str, str], # value and unit
],
) -> SearchFilter:
(negation, search_key, _, operator, search_value) = children
# The only size keys we have are custom measurements right now
if self.is_size_key(search_key.name):
operator_s = handle_negation(negation, operator)
else:
operator_s = get_operator_value(operator)
if self.is_size_key(search_key.name):
search_value_f = parse_size(*search_value)
return SearchFilter(search_key, operator_s, SearchValue(search_value_f))
search_value_s = "".join(search_value)
search_value_s = (
operator_s + search_value_s if operator_s not in ("=", "!=") else search_value_s
)
operator_s = "!=" if is_negated(negation) else "="
return self._handle_basic_filter(search_key, operator_s, SearchValue(search_value_s))
def visit_boolean_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
Node, # boolean value
],
) -> SearchFilter:
(negation, search_key, sep, search_value_node) = children
negated = is_negated(negation)
# Numeric and boolean filters overlap on 1 and 0 values.
if self.is_numeric_key(search_key.name):
return self._handle_numeric_filter(
search_key, "!=" if negated else "=", (search_value_node.text, "")
)
if self.is_boolean_key(search_key.name):
if search_value_node.text.lower() in ("true", "1"):
search_value = SearchValue(0 if negated else 1)
elif search_value_node.text.lower() in ("false", "0"):
search_value = SearchValue(1 if negated else 0)
else:
raise AssertionError(f"unreachable: {search_value_node.text}")
return SearchFilter(search_key, "=", search_value)
search_value = SearchValue(search_value_node.text)
return self._handle_basic_filter(search_key, "=" if not negated else "!=", search_value)
def visit_numeric_in_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
list[tuple[str, str]], # values
],
) -> SearchFilter:
(negation, search_key, _, search_values) = children
operator = handle_negation(negation, "IN")
if self.is_numeric_key(search_key.name):
search_value = SearchValue([parse_numeric_value(*val) for val in search_values])
return SearchFilter(search_key, operator, search_value)
search_value = SearchValue(["".join(value) for value in search_values])
return self._handle_basic_filter(search_key, operator, search_value)
def visit_numeric_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
Node | tuple[str], # operator if present
tuple[str, str], # value and unit
],
) -> SearchFilter:
(negation, search_key, _, operator, raw_search_value) = children
if (
self.is_numeric_key(search_key.name)
or search_key.name in self.config.text_operator_keys
):
operator_s = handle_negation(negation, operator)
else:
operator_s = get_operator_value(operator)
if self.is_numeric_key(search_key.name):
return self._handle_numeric_filter(search_key, operator_s, raw_search_value)
search_value = SearchValue("".join(raw_search_value))
if operator_s not in ("=", "!=") and search_key.name not in self.config.text_operator_keys:
search_value = search_value._replace(raw_value=f"{operator_s}{search_value.raw_value}")
if search_key.name not in self.config.text_operator_keys:
operator_s = "!=" if is_negated(negation) else "="
return self._handle_basic_filter(search_key, operator_s, search_value)
def visit_aggregate_duration_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
AggregateKey,
Node, # :
Node | tuple[str], # operator if present
tuple[str, str], # value and unit
],
) -> AggregateFilter:
(negation, search_key, _, operator, search_value) = children
operator_s = handle_negation(negation, operator)
# Even if the search value matches duration format, only act as
# duration for certain columns
result_type = self.get_function_result_type(search_key.name)
if result_type == "duration" or result_type in DURATION_UNITS:
try:
aggregate_value = parse_duration(*search_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
else:
# Duration overlaps with numeric values with `m` (million vs
# minutes). So we fall through to numeric if it's not a
# duration key
#
# TODO(epurkhiser): Should we validate that the field is
# numeric and do some other fallback if it's not?
try:
aggregate_value = parse_numeric_value(*search_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
return AggregateFilter(search_key, operator_s, SearchValue(aggregate_value))
def visit_aggregate_size_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
AggregateKey,
Node, # :
Node | tuple[str], # operator if present
tuple[str, str], # value + unit
],
) -> AggregateFilter:
(negation, search_key, _, operator, search_value) = children
operator_s = handle_negation(negation, operator)
aggregate_value = parse_size(*search_value)
return AggregateFilter(search_key, operator_s, SearchValue(aggregate_value))
def visit_aggregate_percentage_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
AggregateKey,
Node, # :
Node | tuple[str], # operator if present
str, # percentage value
],
) -> AggregateFilter:
(negation, search_key, _, operator, search_value) = children
operator_s = handle_negation(negation, operator)
# Even if the search value matches percentage format, only act as
# percentage for certain columns
result_type = self.get_function_result_type(search_key.name)
if result_type == "percentage":
aggregate_value = parse_percentage(search_value)
return AggregateFilter(search_key, operator_s, SearchValue(aggregate_value))
# Invalid formats fall back to text match
search_value = operator_s + search_value if operator_s != "=" else search_value
return AggregateFilter(search_key, "=", SearchValue(search_value))
def visit_aggregate_numeric_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
AggregateKey,
Node, # :
Node | tuple[str], # operator if present
tuple[str, str], # value
],
) -> AggregateFilter:
(negation, search_key, _, operator, search_value) = children
operator_s = handle_negation(negation, operator)
aggregate_value = parse_numeric_value(*search_value)
return AggregateFilter(search_key, operator_s, SearchValue(aggregate_value))
def visit_aggregate_date_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
AggregateKey,
Node, # :
Node | tuple[str], # operator if present
str, # value
],
) -> AggregateFilter:
(negation, search_key, _, operator, search_value) = children
operator_s = handle_negation(negation, operator)
is_date_aggregate = any(key in search_key.name for key in self.config.date_keys)
if is_date_aggregate:
try:
search_value_dt = parse_datetime_string(search_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
return AggregateFilter(search_key, operator_s, SearchValue(search_value_dt))
# Invalid formats fall back to text match
search_value = operator_s + search_value if operator_s != "=" else search_value
return AggregateFilter(search_key, "=", SearchValue(search_value))
def visit_aggregate_rel_date_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
AggregateKey,
Node, # :
Node | tuple[str], # operator if present
Node, # value
],
) -> AggregateFilter:
(negation, search_key, _, operator, search_value) = children
operator_s = handle_negation(negation, operator)
is_date_aggregate = any(key in search_key.name for key in self.config.date_keys)
if is_date_aggregate:
try:
dt_range = parse_datetime_range(search_value.text)
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
if dt_range[0] is not None:
operator_s = ">="
search_value_dt = dt_range[0][0]
else:
operator_s = "<="
search_value_dt = dt_range[1][0]
return AggregateFilter(search_key, operator_s, SearchValue(search_value_dt))
# Invalid formats fall back to text match
search_value_s = operator_s + search_value.text if operator_s != "=" else search_value.text
return AggregateFilter(search_key, "=", SearchValue(search_value_s))
def visit_has_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
Node, # has: lookahead
SearchKey, # SearchKey('has')
Node, # :
tuple[SearchKey],
],
) -> SearchFilter:
# the key is has here, which we don't need
negation, _, _, _, (search_key,) = children
# Some datasets do not support the !has filter, but we allow
# team_key_transaction because we control that field and special
# case the way it's processed in search
if (
not self.config.allow_not_has_filter
and is_negated(negation)
and search_key.name != TEAM_KEY_TRANSACTION_ALIAS
):
raise IncompatibleMetricsQuery(NOT_HAS_FILTER_ERROR_MESSAGE)
# if it matched search value instead, it's not a valid key
if isinstance(search_key, SearchValue):
raise InvalidSearchQuery(
'Invalid format for "has" search: was expecting a field or tag instead'
)
operator = "=" if is_negated(negation) else "!="
return SearchFilter(search_key, operator, SearchValue(""))
def visit_is_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
Node, # is: lookahead
SearchKey, # SearchKey('is')
Node, # :
SearchValue,
],
) -> SearchFilter:
negation, _, _, _, search_value = children
translators = self.config.is_filter_translation
if not translators:
raise InvalidSearchQuery('"is:" queries are not supported in this search.')
if search_value.raw_value.startswith("["): # type: ignore[union-attr] # in progress fixing the value type here
raise InvalidSearchQuery('"in" syntax invalid for "is" search')
if search_value.raw_value not in translators:
valid_keys = sorted(translators.keys())
raise InvalidSearchQuery(
f'Invalid value for "is" search, valid values are {valid_keys}'
)
search_key_s, search_value_v = translators[search_value.raw_value] # type: ignore[index] # in progress fixing the value type here
operator = "!=" if is_negated(negation) else "="
search_key = SearchKey(search_key_s)
search_value = SearchValue(search_value_v)
return SearchFilter(search_key, operator, search_value)
def visit_text_in_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
Node | Sequence[Node], # wildcard_op if present
list[str],
],
) -> SearchFilter:
(negation, search_key, _sep, wildcard_op, search_value_lst) = children
operator = "IN"
search_value = SearchValue(search_value_lst)
operator = handle_negation(negation, operator)
if has_wildcard_op(wildcard_op) and isinstance(search_value.raw_value, list):
wildcarded_values = []
found_wildcard_op = get_wildcard_op(wildcard_op)
for value in search_value.raw_value:
if isinstance(value, str):
wildcarded_values.append(gen_wildcard_value(value, found_wildcard_op))
search_value = search_value._replace(raw_value=wildcarded_values)
return self._handle_basic_filter(search_key, operator, search_value)
def visit_text_filter(
self,
node: Node,
children: tuple[
Node | tuple[Node], # ! if present
SearchKey,
Node, # :
Node | Sequence[Node], # wildcard_op if present
Node | tuple[str], # operator if present
SearchValue,
],
) -> SearchFilter:
(negation, search_key, _sep, wildcard_op, operator, search_value) = children
operator_s = get_operator_value(operator)
# XXX: We check whether the text in the node itself is actually empty, so
# we can tell the difference between an empty quoted string and no string
if not search_value.raw_value and not node.children[5].text:
raise InvalidSearchQuery(f"Empty string after '{search_key.name}:'")
if operator_s not in ("=", "!=") and search_key.name not in self.config.text_operator_keys:
# Operators are not supported in text_filter.
# Push it back into the value before handing the negation.
search_value = search_value._replace(raw_value=f"{operator_s}{search_value.raw_value}")
operator_s = "="
operator_s = handle_negation(negation, operator_s)
if has_wildcard_op(wildcard_op) and isinstance(search_value.raw_value, str):
wildcarded_value = gen_wildcard_value(
search_value.raw_value, get_wildcard_op(wildcard_op)
)
search_value = search_value._replace(raw_value=wildcarded_value)
return self._handle_basic_filter(search_key, operator_s, search_value)
# --- End of filter visitors
def visit_key(self, node: Node, children: object) -> str:
return node.text
def visit_escaped_key(self, node: Node, children: object) -> str:
return node.text
def visit_quoted_key(self, node: Node, children: tuple[Node, str, Node]) -> str:
return children[1]
def visit_explicit_tag_key(
self,
node: Node,
children: tuple[
Node, # "tags"
str, # '['
str, # escaped_key
str, # ']'
],
) -> SearchKey:
return SearchKey(f"tags[{children[2]}]")
def visit_explicit_string_tag_key(
self,
node: Node,
children: tuple[
Node, # "tags"
str, # '['
str, # escaped_key
str, # ' '
Node, # ','
str, # ' '
Node, # "string"
str, # ']'
],
) -> SearchKey:
return SearchKey(f"tags[{children[2]},string]")
def visit_explicit_number_tag_key(
self,
node: Node,
children: tuple[
Node, # "tags"
str, # '['
str, # escaped_key
str, # ' '
Node, # ','
str, # ' '
Node, # "number"
str, # ']'
],
) -> SearchKey:
return SearchKey(f"tags[{children[2]},number]")
def visit_explicit_flag_key(
self,
node: Node,
children: tuple[
Node, # "flags"
str, # [
str, # escaped_key
str, # ]
],
) -> SearchKey:
return SearchKey(f"flags[{children[2]}]")
def visit_explicit_string_flag_key(
self,
node: Node,
children: tuple[
Node, # "flags"
str, # '['
str, # escaped_key
str, # ' '
Node, # ','
str, # ' '
Node, # "string"
str, # ']'
],
) -> SearchKey:
return SearchKey(f"flags[{children[2]},string]")
def visit_explicit_number_flag_key(
self,
node: Node,
children: tuple[
Node, # "flags"
str, # '['
str, # escaped_key
str, # ' '
Node, # ','
str, # ' '
Node, # "number"
str, # ']'
],
) -> SearchKey:
return SearchKey(f"flags[{children[2]},number]")
def visit_aggregate_key(
self,
node: Node,
children: tuple[
str, # function name
str, # open paren
str, # space
Node | tuple[list[str]], # args if present
str, # space
str, # close paren
],
) -> AggregateKey:
function_name, open_paren, _, args, _, close_paren = children
if isinstance(args, Node):
args_s = ""
else:
args_s = ", ".join(args[0])
key = "".join([function_name, open_paren, args_s, close_paren])
return AggregateKey(self.key_mappings_lookup.get(key, key))
def visit_function_args(
self,
node: Node,
children: tuple[
str, # value
(Node | tuple[tuple[str, Node, str, Node, tuple[str]], ...]), # no match # repeat
],
) -> list[str]:
if isinstance(children[1], Node):
return [children[0]]
else:
return process_list(children[0], children[1])
def visit_aggregate_param(self, node: Node, children: tuple[str]) -> str:
return children[0]
def visit_raw_aggregate_param(self, node: Node, children: object) -> str:
return node.text
def visit_quoted_aggregate_param(
self,
node: Node,
children: tuple[
Node, # "
Node | _RecursiveList[Node], # content
Node, # "
],
) -> str:
if isinstance(children[1], Node): # empty string
value = ""
else:
value = "".join(node.text for node in flatten(children[1]))
return f'"{value}"'
def visit_explicit_tag_key_aggregate_param(
self,
node: Node,
children: tuple[SearchKey],
) -> str:
return children[0].name
def visit_search_key(self, node: Node, children: tuple[str | SearchKey]) -> SearchKey:
key = children[0]
if (
self.config.allowed_keys
and key not in self.config.allowed_keys
or key in self.config.blocked_keys
):
raise InvalidSearchQuery(f"Invalid key for this search: {key}")
if isinstance(key, SearchKey):
return key
return SearchKey(self.key_mappings_lookup.get(key, key))
def visit_text_key(self, node: Node, children: tuple[SearchKey]) -> SearchKey:
return children[0]
def visit_value(self, node: Node, children: object) -> str:
# A properly quoted value will match the quoted value regex, so any unescaped
# quotes are errors.
value = node.text
idx = value.find('"')
if idx == 0:
raise InvalidSearchQuery(
f"Invalid quote at '{node.text}': quotes must enclose text or be escaped."
)
while idx != -1:
if value[idx - 1] != "\\":
raise InvalidSearchQuery(
f"Invalid quote at '{node.text}': quotes must enclose text or be escaped."
)
value = value[idx + 1 :]
idx = value.find('"')
return node.text.replace('\\"', '"')
def visit_quoted_value(
self,
node: Node,
children: tuple[
Node, # "
Node | _RecursiveList[Node], # content
Node, # "
],
) -> str:
if isinstance(children[1], Node): # empty string
value = ""
else:
value = "".join(node.text for node in flatten(children[1]))
value = value.replace('\\"', '"')
return value
def visit_in_value(self, node: Node, children: object) -> str:
return node.text.replace('\\"', '"')
def visit_text_in_value(self, node: Node, children: tuple[str]) -> str:
return children[0]
def visit_search_value(self, node: Node, children: tuple[str]) -> SearchValue:
return SearchValue(children[0])
def visit_numeric_value(
self,
node: Node,
children: tuple[
Node | list[Node], # sign
str, # value
Node | list[Node], # unit
Node, # terminating lookahead
],
) -> list[str]:
(sign, value, suffix, _) = children
sign_s = sign[0].text if isinstance(sign, list) else ""
suffix_s = suffix[0].text if isinstance(suffix, list) else ""
return [f"{sign_s}{value}", suffix_s]
def visit_boolean_value(self, node: Node, children: object) -> Node:
return node
def visit_text_in_list(
self,
node: Node,
children: tuple[
str, # '['
str, # value
tuple[tuple[str, Node, str, Node, tuple[str]], ...], # repeat
str, # ']'
Node, # terminating lookahead
],
) -> list[str]:
return process_list(children[1], children[2])
def visit_numeric_in_list(
self,
node: Node,
children: tuple[
str, # '['
tuple[str, str], # value
tuple[tuple[str, Node, str, Node, tuple[tuple[str, str]]], ...], # repeat
str, # ']'
Node, # terminating lookahead
],
) -> list[tuple[str, str]]:
return process_list(children[1], children[2])
def visit_iso_8601_date_format(self, node: Node, children: object) -> str:
return node.text
def visit_rel_date_format(self, node: Node, children: object) -> Node:
return node
def visit_duration_format(
self, node: Node, children: tuple[str, tuple[Node], Node]
) -> list[str]:
return [children[0], children[1][0].text]
def visit_size_format(self, node: Node, children: tuple[str, tuple[Node]]) -> list[str]:
return [children[0], children[1][0].text]
def visit_percentage_format(self, node: Node, children: tuple[str, Node]) -> str:
return children[0]
def visit_operator(self, node: Node, children: object) -> str:
return node.text
def visit_or_operator(self, node: Node, children: object) -> str:
return node.text.upper()
def visit_and_operator(self, node: Node, children: object) -> str:
return node.text.upper()
def visit_numeric(self, node: Node, children: object) -> str:
return node.text
def visit_open_paren(self, node: Node, children: object) -> str:
return node.text
def visit_closed_paren(self, node: Node, children: object) -> str:
return node.text
def visit_open_bracket(self, node: Node, children: object) -> str:
return node.text
def visit_closed_bracket(self, node: Node, children: object) -> str:
return node.text
def visit_sep(self, node: Node, children: object) -> Node:
return node
def visit_negation(self, node: Node, children: object) -> Node:
return node
def visit_wildcard_op(self, node: Node, children: object) -> Node:
return node
def visit_comma(self, node: Node, children: object) -> Node:
return node
def visit_spaces(self, node: Node, children: object) -> str:
return " "
def generic_visit(self, node: Node, children: Sequence[Any]) -> Any:
return children or node
default_config = SearchConfig(
duration_keys={"transaction.duration"},
text_operator_keys={SEMVER_ALIAS, SEMVER_BUILD_ALIAS},
# do not put aggregate functions in this list
numeric_keys={
"project_id",
"project.id",
"issue.id",
"stack.colno",
"stack.lineno",
"stack.stack_level",
"transaction.duration",
},
date_keys={
"start",
"end",
"last_seen()",
"time",
"timestamp",
"timestamp.to_hour",
"timestamp.to_day",
"error.received",
},
boolean_keys={
"error.handled",
"error.unhandled",
"error.main_thread",
"stack.in_app",
"is_application",
"symbolicated_in_app",
TEAM_KEY_TRANSACTION_ALIAS,
},
)
@overload
def parse_search_query(
query: str,
*,
config: SearchConfig[Literal[False]],
params: ParamsType | None = None,
get_field_type: Callable[[str], str | None] | None = None,
get_function_result_type: Callable[[str], str | None] | None = None,
) -> Sequence[SearchFilter | AggregateFilter]: ...
@overload
def parse_search_query(
query: str,
*,
config: SearchConfig[Literal[True]] | None = None,
params: ParamsType | None = None,
get_field_type: Callable[[str], str | None] | None = None,
get_function_result_type: Callable[[str], str | None] | None = None,
) -> Sequence[QueryToken]: ...
def parse_search_query(
query: str,
*,
config: SearchConfig[Any] | None = None,
params: ParamsType | None = None,
get_field_type: Callable[[str], str | None] | None = None,
get_function_result_type: Callable[[str], str | None] | None = None,
) -> Sequence[QueryToken]:
if config is None:
config = default_config
try:
tree = event_search_grammar.parse(query)
except IncompleteParseError as e:
idx = e.column()
prefix = query[max(0, idx - 5) : idx]
suffix = query[idx : (idx + 5)]
raise InvalidSearchQuery(
"{} {}".format(
f"Parse error at '{prefix}{suffix}' (column {e.column():d}).",
"This is commonly caused by unmatched parentheses. Enclose any text in double quotes.",
)
)
return SearchVisitor(
config,
params=params,
get_field_type=get_field_type,
get_function_result_type=get_function_result_type,
).visit(tree)
| SearchVisitor |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_gitlab.py | {
"start": 1187,
"end": 10193
} | class ____(TestCase):
factory = RequestFactory()
path = f"{IntegrationClassification.integration_prefix}gitlab/webhook/"
def get_response(self, req: HttpRequest) -> HttpResponse:
return HttpResponse(status=200, content="passthrough")
def get_integration(self) -> Integration:
self.organization = self.create_organization(owner=self.user, region="us")
return self.create_integration(
organization=self.organization,
provider="gitlab",
name="Example Gitlab",
external_id=EXTERNAL_ID,
metadata={
"instance": "example.gitlab.com",
"base_url": "https://example.gitlab.com",
"domain_name": "example.gitlab.com/group-x",
"verify_ssl": False,
"webhook_secret": WEBHOOK_SECRET,
"group_id": 1,
},
)
def run_parser(self, request):
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
return parser.get_response()
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_missing_x_gitlab_token(self) -> None:
self.get_integration()
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_EVENT="lol",
)
response = self.run_parser(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert (
response.reason_phrase == "The customer needs to set a Secret Token in their webhook."
)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_invalid_token(self) -> None:
self.get_integration()
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN="wrong",
HTTP_X_GITLAB_EVENT="Push Hook",
)
response = self.run_parser(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.reason_phrase == "The customer's Secret Token is malformed."
assert_no_webhook_payloads()
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
@responses.activate
def test_routing_webhook_properly_no_regions(self) -> None:
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN=WEBHOOK_TOKEN,
HTTP_X_GITLAB_EVENT="Push Hook",
)
integration = self.get_integration()
with outbox_context(transaction.atomic(using=router.db_for_write(OrganizationIntegration))):
# Remove all organizations from integration
OrganizationIntegration.objects.filter(integration=integration).delete()
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert len(responses.calls) == 0
assert_no_webhook_payloads()
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
@responses.activate
def test_routing_webhook_properly_with_regions(self) -> None:
integration = self.get_integration()
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN=WEBHOOK_TOKEN,
HTTP_X_GITLAB_EVENT="Push Hook",
)
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_202_ACCEPTED
assert response.content == b""
assert len(responses.calls) == 0
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"gitlab:{integration.id}",
region_names=[region.name],
)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
@responses.activate
def test_routing_webhook_properly_with_multiple_orgs(self) -> None:
integration = self.get_integration()
other_org = self.create_organization(owner=self.user)
integration.add_organization(other_org)
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN=WEBHOOK_TOKEN,
HTTP_X_GITLAB_EVENT="Push Hook",
)
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 202
assert response.content == b""
assert len(responses.calls) == 0
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"gitlab:{integration.id}",
region_names=[region.name],
)
@override_regions(region_config)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@responses.activate
def test_routing_webhook_with_mailbox_buckets(self) -> None:
integration = self.get_integration()
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN=WEBHOOK_TOKEN,
HTTP_X_GITLAB_EVENT="Push Hook",
)
with mock.patch(
"sentry.integrations.middleware.hybrid_cloud.parser.ratelimiter.is_limited"
) as mock_is_limited:
mock_is_limited.return_value = True
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_202_ACCEPTED
assert response.content == b""
assert len(responses.calls) == 0
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"gitlab:{integration.id}:15",
region_names=[region.name],
)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
@responses.activate
def test_routing_search_properly(self) -> None:
self.get_integration()
path = reverse(
"sentry-extensions-gitlab-search",
kwargs={
"organization_id_or_slug": self.organization.slug,
"integration_id": self.integration.id,
},
)
request = self.factory.post(path, data={}, content_type="application/json")
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_200_OK
assert response.content == b"passthrough"
assert len(responses.calls) == 0
assert_no_webhook_payloads()
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_get_integration_from_request(self) -> None:
integration = self.get_integration()
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN=WEBHOOK_TOKEN,
HTTP_X_GITLAB_EVENT="Push Hook",
)
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
result = parser.get_integration_from_request()
assert result is not None
assert result.id == integration.id
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
@responses.activate
def test_webhook_outbox_creation(self) -> None:
request = self.factory.post(
self.path,
data=PUSH_EVENT,
content_type="application/json",
HTTP_X_GITLAB_TOKEN=WEBHOOK_TOKEN,
HTTP_X_GITLAB_EVENT="Push Hook",
)
integration = self.get_integration()
parser = GitlabRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_202_ACCEPTED
assert response.content == b""
assert len(responses.calls) == 0
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"gitlab:{integration.id}",
region_names=[region.name],
)
| GitlabRequestParserTest |
python | Textualize__textual | docs/examples/guide/compound/byte03.py | {
"start": 314,
"end": 1468
} | class ____(Widget):
"""A Switch with a numeric label above it."""
DEFAULT_CSS = """
BitSwitch {
layout: vertical;
width: auto;
height: auto;
}
BitSwitch > Label {
text-align: center;
width: 100%;
}
"""
class BitChanged(Message):
"""Sent when the 'bit' changes."""
def __init__(self, bit: int, value: bool) -> None:
super().__init__()
self.bit = bit
self.value = value
value = reactive(0)
def __init__(self, bit: int) -> None:
self.bit = bit
super().__init__()
def compose(self) -> ComposeResult:
yield Label(str(self.bit))
yield Switch()
def watch_value(self, value: bool) -> None: # (1)!
"""When the value changes we want to set the switch accordingly."""
self.query_one(Switch).value = value
def on_switch_changed(self, event: Switch.Changed) -> None:
"""When the switch changes, notify the parent via a message."""
event.stop()
self.value = event.value
self.post_message(self.BitChanged(self.bit, event.value))
| BitSwitch |
python | sphinx-doc__sphinx | sphinx/util/cfamily.py | {
"start": 4798,
"end": 5402
} | class ____(ASTBaseBase):
def __init__(self, name: str, args: ASTBaseParenExprList | None) -> None:
self.name = name
self.args = args
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTGnuAttribute):
return NotImplemented
return self.name == other.name and self.args == other.args
def __hash__(self) -> int:
return hash((self.name, self.args))
def _stringify(self, transform: StringifyTransform) -> str:
if self.args:
return self.name + transform(self.args)
return self.name
| ASTGnuAttribute |
python | django__django | tests/forms_tests/widget_tests/test_textinput.py | {
"start": 130,
"end": 4118
} | class ____(WidgetTest):
widget = TextInput()
def test_render(self):
self.check_html(
self.widget, "email", "", html='<input type="text" name="email">'
)
def test_render_none(self):
self.check_html(
self.widget, "email", None, html='<input type="text" name="email">'
)
def test_render_value(self):
self.check_html(
self.widget,
"email",
"test@example.com",
html=('<input type="text" name="email" value="test@example.com">'),
)
def test_render_boolean(self):
"""
Boolean values are rendered to their string forms ("True" and
"False").
"""
self.check_html(
self.widget,
"get_spam",
False,
html=('<input type="text" name="get_spam" value="False">'),
)
self.check_html(
self.widget,
"get_spam",
True,
html=('<input type="text" name="get_spam" value="True">'),
)
def test_render_quoted(self):
self.check_html(
self.widget,
"email",
'some "quoted" & ampersanded value',
html=(
'<input type="text" name="email" '
'value="some "quoted" & ampersanded value">'
),
)
def test_render_custom_attrs(self):
self.check_html(
self.widget,
"email",
"test@example.com",
attrs={"class": "fun"},
html=(
'<input type="text" name="email" value="test@example.com" class="fun">'
),
)
def test_render_unicode(self):
self.check_html(
self.widget,
"email",
"ŠĐĆŽćžšđ",
attrs={"class": "fun"},
html=(
'<input type="text" name="email" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" class="fun">'
),
)
def test_constructor_attrs(self):
widget = TextInput(attrs={"class": "fun", "type": "email"})
self.check_html(
widget, "email", "", html='<input type="email" class="fun" name="email">'
)
self.check_html(
widget,
"email",
"foo@example.com",
html=(
'<input type="email" class="fun" value="foo@example.com" name="email">'
),
)
def test_attrs_precedence(self):
"""
`attrs` passed to render() get precedence over those passed to the
constructor
"""
widget = TextInput(attrs={"class": "pretty"})
self.check_html(
widget,
"email",
"",
attrs={"class": "special"},
html='<input type="text" class="special" name="email">',
)
def test_attrs_safestring(self):
widget = TextInput(attrs={"onBlur": mark_safe("function('foo')")})
self.check_html(
widget,
"email",
"",
html='<input onBlur="function(\'foo\')" type="text" name="email">',
)
def test_use_required_attribute(self):
# Text inputs can safely trigger the browser validation.
self.assertIs(self.widget.use_required_attribute(None), True)
self.assertIs(self.widget.use_required_attribute(""), True)
self.assertIs(self.widget.use_required_attribute("resume.txt"), True)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = CharField(widget=self.widget)
form = TestForm()
self.assertIs(self.widget.use_fieldset, False)
self.assertHTMLEqual(
'<div><label for="id_field">Field:</label>'
'<input type="text" name="field" required id="id_field"></div>',
form.render(),
)
| TextInputTest |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 45926,
"end": 46445
} | class ____(TestCase):
@staticmethod
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
yield b""
yield b""
def test_err(self):
chunks = []
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, body='', chunks=chunks)
garbage = fd.read()
self.assertEqual(garbage, b"", "got garbage: %r" % garbage)
| TestEmptyYield |
python | huggingface__transformers | src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | {
"start": 2521,
"end": 4439
} | class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(
self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(position_ids)
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
| BlenderbotSmallLearnedPositionalEmbedding |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 82111,
"end": 82219
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING
| AutoModelForTextEncoding |
python | sqlalchemy__sqlalchemy | test/perf/compiled_extensions/row.py | {
"start": 116,
"end": 1687
} | class ____(Case):
NUMBER = 2_000_000
@staticmethod
def python():
from sqlalchemy.engine import _util_cy
py_util = load_uncompiled_module(_util_cy)
assert not py_util._is_compiled()
return py_util.tuplegetter
@staticmethod
def cython():
from sqlalchemy.engine import _util_cy
assert _util_cy._is_compiled()
return _util_cy.tuplegetter
IMPLEMENTATIONS = {
"python": python.__func__,
"cython": cython.__func__,
}
def init_objects(self):
self.impl_tg = self.impl
self.tuple = tuple(range(1000))
self.tg_inst = self.impl_tg(42)
self.tg_inst_m = self.impl_tg(42, 420, 99, 9, 1)
self.tg_inst_seq = self.impl_tg(*range(70, 75))
@classmethod
def update_results(cls, results):
cls._divide_results(results, "c", "python", "c / py")
cls._divide_results(results, "cython", "python", "cy / py")
cls._divide_results(results, "cython", "c", "cy / c")
@test_case
def tuplegetter_one(self):
self.tg_inst(self.tuple)
@test_case
def tuplegetter_many(self):
self.tg_inst_m(self.tuple)
@test_case
def tuplegetter_seq(self):
self.tg_inst_seq(self.tuple)
@test_case
def tuplegetter_new_one(self):
self.impl_tg(42)(self.tuple)
@test_case
def tuplegetter_new_many(self):
self.impl_tg(42, 420, 99, 9, 1)(self.tuple)
@test_case
def tuplegetter_new_seq(self):
self.impl_tg(40, 41, 42, 43, 44)(self.tuple)
| TupleGetter |
python | huggingface__transformers | src/transformers/data/processors/squad.py | {
"start": 28133,
"end": 28895
} | class ____:
"""
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
"""
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits
| SquadResult |
python | apache__airflow | airflow-ctl/src/airflowctl/exceptions.py | {
"start": 1437,
"end": 1573
} | class ____(AirflowCtlException):
"""Raise when a connection error occurs while performing an operation."""
| AirflowCtlConnectionException |
python | huggingface__transformers | src/transformers/models/llama4/modeling_llama4.py | {
"start": 42610,
"end": 43935
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
idx = config.image_size // config.patch_size
img_idx = torch.arange(idx**2, dtype=torch.int32).reshape(idx**2, 1)
img_idx = torch.cat([img_idx, img_idx[:1]], dim=0)
img_idx[-1, -1] = -2 # ID_CLS_TOKEN
frequencies_x = img_idx % idx # get the coordinates of the 2d matrix along x
frequencies_y = img_idx // idx # get the coordinates of the 2d matrix along y
freq_dim = config.hidden_size // config.num_attention_heads // 2
rope_freq = 1.0 / (config.rope_theta ** (torch.arange(0, freq_dim, 2)[: (freq_dim // 2)].float() / freq_dim))
freqs_x = ((frequencies_x + 1)[..., None] * rope_freq[None, None, :]).repeat_interleave(2, dim=-1)
freqs_y = ((frequencies_y + 1)[..., None] * rope_freq[None, None, :]).repeat_interleave(2, dim=-1)
freqs = torch.cat([freqs_x, freqs_y], dim=-1).float().contiguous()[..., ::2]
freqs = freqs.masked_fill(img_idx.reshape(-1, 1, 1) < 0, 0)
freq_cis = torch.view_as_complex(torch.stack([torch.cos(freqs), torch.sin(freqs)], dim=-1))
self.freqs_ci = freq_cis # idx**2, idx**2, idx * 2
def forward(self, hidden_states):
return self.freqs_ci.to(hidden_states.device)
| Llama4VisionRotaryEmbedding |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 56049,
"end": 57210
} | class ____(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined with the subsequent line.
Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = "\\"
replace_char = " "
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append("".join(parts))
parts = []
return outlines
| ContinuationLinesInputter |
python | pytorch__pytorch | benchmarks/distributed/bench_nvshmem_tile_reduce.py | {
"start": 983,
"end": 6195
} | class ____(MultiProcContinuousTest):
def _init_device(self) -> None:
# TODO: relieve this (seems to hang if without)
device_module.set_device(self.device)
# Set NVSHMEM as SymmMem backend
symm_mem.set_backend("NVSHMEM")
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
def _benchmark_tile_reduce_single(
self,
full_size: int,
tile_size: int,
warmup_iters: int = 5,
bench_iters: int = 10,
) -> dict:
"""
Benchmark a single configuration of tile reduce.
Args:
full_size: Size of the full matrix (full_size x full_size)
warmup_iters: Number of warmup iterations
bench_iters: Number of benchmark iterations
Returns:
Dictionary with benchmark results
"""
self._init_device()
group_name = dist.group.WORLD.group_name
symm_mem.enable_symm_mem_for_group(group_name)
dtype = torch.float
# Allocate full matrices
full_inp = symm_mem.empty(
full_size, full_size, dtype=dtype, device=self.device
).fill_(self.rank)
full_out = symm_mem.empty(
full_size, full_size, dtype=dtype, device=self.device
).fill_(0)
slice_ut = slice(0, tile_size)
inp_tile = full_inp[slice_ut, slice_ut]
out_tile = full_out[slice_ut, slice_ut]
root = 0
# Warmup iterations
for _ in range(warmup_iters):
torch.ops.symm_mem.tile_reduce(inp_tile, out_tile, root, group_name)
torch.cuda.synchronize(self.device)
# Benchmark iterations
times = []
dist.barrier()
torch.cuda.synchronize(self.device)
start_time = time.perf_counter()
for _ in range(bench_iters):
torch.ops.symm_mem.tile_reduce(inp_tile, out_tile, root, group_name)
torch.cuda.synchronize(self.device)
end_time = time.perf_counter()
times.append((end_time - start_time) / bench_iters)
# Calculate statistics
times = torch.tensor(times, dtype=torch.float64)
tile_elements = tile_size * tile_size
tile_bytes = (
tile_elements * dtype.itemsize
if hasattr(dtype, "itemsize")
else tile_elements * 4
)
results = {
"full_size": full_size,
"tile_size": tile_size,
"tile_elements": tile_elements,
"tile_bytes": tile_bytes,
"world_size": self.world_size,
"mean_time_ms": times.mean().item() * 1000,
"std_time_ms": times.std().item() * 1000,
"min_time_ms": times.min().item() * 1000,
"max_time_ms": times.max().item() * 1000,
"throughput_gb_s": tile_bytes / (times.mean().item() * 1e9),
"elements_per_sec": tile_elements / times.mean().item(),
}
return results
@skipIfRocm
def test_benchmark_tile_reduce_various_sizes(self) -> None:
"""
Benchmark tile reduce across various matrix sizes.
"""
# Test various matrix sizes
tile_sizes = [512, 1024, 2048, 4096, 8192, 16384]
full_size = tile_sizes[-1]
warmup_iters = 5
bench_iters = 20
results = []
for tile_size in tile_sizes:
try:
result = self._benchmark_tile_reduce_single(
full_size, tile_size, warmup_iters, bench_iters
)
results.append(result)
if self.rank == 0:
print(
f"Matrix Size: {full_size}x{full_size}, Tile Size: {tile_size}x{tile_size}"
)
print(
f" Mean Time: {result['mean_time_ms']:.3f} ± {result['std_time_ms']:.3f} ms"
)
print(f" Throughput: {result['throughput_gb_s']:.2f} GB/s")
print(f" Bytes: {result['tile_bytes']:.0f}")
print()
except Exception as e:
if self.rank == 0:
print(f"Failed to benchmark matrix size {full_size}: {e}")
# Print summary
if self.rank == 0 and results:
print("=== BENCHMARK SUMMARY ===")
print(
f"{'Matrix Size':<12} {'Tile Size':<10} {'Time (ms)':<12} {'Throughput (GB/s)':<18} {'Bytes':<15}"
)
print("-" * 70)
for result in results:
print(
f"{result['full_size']}x{result['full_size']:<7} "
f"{result['tile_size']}x{result['tile_size']:<5} "
f"{result['mean_time_ms']:<12.3f} "
f"{result['throughput_gb_s']:<18.2f} "
f"{result['tile_bytes']:<15.0f}"
)
if __name__ == "__main__":
# For standalone usage, you'd need to set up distributed environment
# For now, this is meant to be run via the PyTorch test framework
from torch.testing._internal.common_utils import run_tests
run_tests()
| NVSHMEMTileReduceBenchmark |
python | numba__numba | numba/tests/test_linalg.py | {
"start": 49719,
"end": 59972
} | class ____(TestLinalgSystems):
"""
Tests for np.linalg.lstsq.
"""
# NOTE: The testing of this routine is hard as it has to handle numpy
# using double precision routines on single precision input, this has
# a knock on effect especially in rank deficient cases and cases where
# conditioning is generally poor. As a result computed ranks can differ
# and consequently the calculated residual can differ.
# The tests try and deal with this as best as they can through the use
# of reconstruction and measures like residual norms.
# Suggestions for improvements are welcomed!
@needs_lapack
def test_linalg_lstsq(self):
"""
Test np.linalg.lstsq
"""
cfunc = jit(nopython=True)(lstsq_system)
def check(A, B, **kwargs):
expected = lstsq_system(A, B, **kwargs)
got = cfunc(A, B, **kwargs)
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that length is 4
self.assertEqual(len(got), 4)
# and that the computed results are contig and in the same way
self.assert_contig_sanity(got, "C")
use_reconstruction = False
# check the ranks are the same and continue to a standard
# match if that is the case (if ranks differ, then output
# in e.g. residual array is of different size!).
try:
self.assertEqual(got[2], expected[2])
# try plain match of each array to np first
for k in range(len(expected)):
try:
np.testing.assert_array_almost_equal_nulp(
got[k], expected[k], nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
except AssertionError:
use_reconstruction = True
if use_reconstruction:
x, res, rank, s = got
# indicies in the output which are ndarrays
out_array_idx = [0, 1, 3]
try:
# check the ranks are the same
self.assertEqual(rank, expected[2])
# check they are dimensionally correct, skip [2] = rank.
for k in out_array_idx:
if isinstance(expected[k], np.ndarray):
self.assertEqual(got[k].shape, expected[k].shape)
except AssertionError:
# check the rank differs by 1. (numerical fuzz)
self.assertTrue(abs(rank - expected[2]) < 2)
# check if A*X = B
resolution = np.finfo(A.dtype).resolution
try:
# this will work so long as the conditioning is
# ok and the rank is full
rec = np.dot(A, x)
np.testing.assert_allclose(
B,
rec,
rtol=10 * resolution,
atol=10 * resolution
)
except AssertionError:
# system is probably under/over determined and/or
# poorly conditioned. Check slackened equality
# and that the residual norm is the same.
for k in out_array_idx:
try:
np.testing.assert_allclose(
expected[k],
got[k],
rtol=100 * resolution,
atol=100 * resolution
)
except AssertionError:
# check the fail is likely due to bad conditioning
c = np.linalg.cond(A)
self.assertGreater(10 * c, (1. / resolution))
# make sure the residual 2-norm is ok
# if this fails its probably due to numpy using double
# precision LAPACK routines for singles.
res_expected = np.linalg.norm(
B - np.dot(A, expected[0]))
res_got = np.linalg.norm(B - np.dot(A, x))
# rtol = 10. as all the systems are products of orthonormals
# and on the small side (rows, cols) < 100.
np.testing.assert_allclose(
res_expected, res_got, rtol=10.)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(A, B, **kwargs)
# test: column vector, tall, wide, square, row vector
# prime sizes, the A's
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# compatible B's for Ax=B must have same number of rows and 1 or more
# columns
# This test takes ages! So combinations are trimmed via cycling
# gets a dtype
cycle_dt = cycle(self.dtypes)
orders = ['F', 'C']
# gets a memory order flag
cycle_order = cycle(orders)
# a specific condition number to use in the following tests
# there is nothing special about it other than it is not magic
specific_cond = 10.
# inner test loop, extracted as there's additional logic etc required
# that'd end up with this being repeated a lot
def inner_test_loop_fn(A, dt, **kwargs):
# test solve Ax=B for (column, matrix) B, same dtype as A
b_sizes = (1, 13)
for b_size in b_sizes:
# check 2D B
b_order = next(cycle_order)
B = self.specific_sample_matrix(
(A.shape[0], b_size), dt, b_order)
check(A, B, **kwargs)
# check 1D B
b_order = next(cycle_order)
tmp = B[:, 0].copy(order=b_order)
check(A, tmp, **kwargs)
# test loop
for a_size in sizes:
dt = next(cycle_dt)
a_order = next(cycle_order)
# A full rank, well conditioned system
A = self.specific_sample_matrix(a_size, dt, a_order)
# run the test loop
inner_test_loop_fn(A, dt)
m, n = a_size
minmn = min(m, n)
# operations that only make sense with a 2D matrix system
if m != 1 and n != 1:
# Test a rank deficient system
r = minmn - 1
A = self.specific_sample_matrix(
a_size, dt, a_order, rank=r)
# run the test loop
inner_test_loop_fn(A, dt)
# Test a system with a given condition number for use in
# testing the rcond parameter.
# This works because the singular values in the
# specific_sample_matrix code are linspace (1, cond, [0... if
# rank deficient])
A = self.specific_sample_matrix(
a_size, dt, a_order, condition=specific_cond)
# run the test loop
rcond = 1. / specific_cond
approx_half_rank_rcond = minmn * rcond
inner_test_loop_fn(A, dt,
rcond=approx_half_rank_rcond)
# check empty arrays
empties = [
[(0, 1), (1,)], # empty A, valid b
[(1, 0), (1,)], # empty A, valid b
[(1, 1), (0,)], # valid A, empty 1D b
[(1, 1), (1, 0)], # valid A, empty 2D b
]
for A, b in empties:
args = (np.empty(A), np.empty(b))
self.assert_raise_on_empty(cfunc, args)
# Test input validation
ok = np.array([[1., 2.], [3., 4.]], dtype=np.float64)
# check ok input is ok
cfunc, (ok, ok)
# check bad inputs
rn = "lstsq"
# Wrong dtype
bad = np.array([[1, 2], [3, 4]], dtype=np.int32)
self.assert_wrong_dtype(rn, cfunc, (ok, bad))
self.assert_wrong_dtype(rn, cfunc, (bad, ok))
# different dtypes
bad = np.array([[1, 2], [3, 4]], dtype=np.float32)
self.assert_homogeneous_dtypes(rn, cfunc, (ok, bad))
self.assert_homogeneous_dtypes(rn, cfunc, (bad, ok))
# Dimension issue
bad = np.array([1, 2], dtype=np.float64)
self.assert_wrong_dimensions(rn, cfunc, (bad, ok))
# no nans or infs
bad = np.array([[1., 2., ], [np.inf, np.nan]], dtype=np.float64)
self.assert_no_nan_or_inf(cfunc, (ok, bad))
self.assert_no_nan_or_inf(cfunc, (bad, ok))
# check 1D is accepted for B (2D is done previously)
# and then that anything of higher dimension raises
oneD = np.array([1., 2.], dtype=np.float64)
cfunc, (ok, oneD)
bad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float64)
self.assert_wrong_dimensions_1D(rn, cfunc, (ok, bad))
# check a dimensionally invalid system raises (1D and 2D cases
# checked)
bad1D = np.array([1.], dtype=np.float64)
bad2D = np.array([[1.], [2.], [3.]], dtype=np.float64)
self.assert_dimensionally_invalid(cfunc, (ok, bad1D))
self.assert_dimensionally_invalid(cfunc, (ok, bad2D))
@needs_lapack
def test_issue3368(self):
X = np.array([[1., 7.54, 6.52],
[1., 2.70, 4.00],
[1., 2.50, 3.80],
[1., 1.15, 5.64],
[1., 4.22, 3.27],
[1., 1.41, 5.70],], order='F')
X_orig = np.copy(X)
y = np.array([1., 2., 3., 4., 5., 6.])
@jit(nopython=True)
def f2(X, y, test):
if test:
# never executed, but necessary to trigger the bug
X = X[1:2, :]
return np.linalg.lstsq(X, y)
f2(X, y, False)
np.testing.assert_allclose(X, X_orig)
| TestLinalgLstsq |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/tex.py | {
"start": 10286,
"end": 10398
} | class ____(tex):
texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False)
| xelatex |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_setitem.py | {
"start": 47871,
"end": 48399
} | class ____(CoercionTest):
# previously test_setitem_series_datetime64tz in tests.indexing.test_coercion
@pytest.fixture
def obj(self):
tz = "US/Eastern"
return Series(date_range("2011-01-01", freq="D", periods=4, tz=tz, unit="ns"))
@pytest.fixture
def raises(self):
return False
@pytest.mark.parametrize(
"val,exp_dtype,raises",
[
(Timedelta("12 day"), "timedelta64[ns]", False),
(1, object, True),
("x", object, True),
],
)
| TestCoercionDatetime64TZ |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 98369,
"end": 98637
} | class ____:
xlProtectedViewWindowMaximized = 2 # from enum XlProtectedViewWindowState
xlProtectedViewWindowMinimized = 1 # from enum XlProtectedViewWindowState
xlProtectedViewWindowNormal = 0 # from enum XlProtectedViewWindowState
| ProtectedViewWindowState |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 29517,
"end": 29862
} | class ____(sgqlc.types.Enum):
"""The possible state reasons of a closed issue.
Enumeration Choices:
* `COMPLETED`: An issue that has been closed as completed
* `NOT_PLANNED`: An issue that has been closed as not planned
"""
__schema__ = github_schema
__choices__ = ("COMPLETED", "NOT_PLANNED")
| IssueClosedStateReason |
python | tqdm__tqdm | tqdm/rich.py | {
"start": 2328,
"end": 5021
} | class ____(std_tqdm): # pragma: no cover
"""Experimental rich.progress GUI version of tqdm!"""
# TODO: @classmethod: write()?
def __init__(self, *args, **kwargs):
"""
This class accepts the following parameters *in addition* to
the parameters accepted by `tqdm`.
Parameters
----------
progress : tuple, optional
arguments for `rich.progress.Progress()`.
options : dict, optional
keyword arguments for `rich.progress.Progress()`.
"""
kwargs = kwargs.copy()
kwargs['gui'] = True
# convert disable = None to False
kwargs['disable'] = bool(kwargs.get('disable', False))
progress = kwargs.pop('progress', None)
options = kwargs.pop('options', {}).copy()
super().__init__(*args, **kwargs)
if self.disable:
return
warn("rich is experimental/alpha", TqdmExperimentalWarning, stacklevel=2)
d = self.format_dict
if progress is None:
progress = (
"[progress.description]{task.description}"
"[progress.percentage]{task.percentage:>4.0f}%",
BarColumn(bar_width=None),
FractionColumn(
unit_scale=d['unit_scale'], unit_divisor=d['unit_divisor']),
"[", TimeElapsedColumn(), "<", TimeRemainingColumn(),
",", RateColumn(unit=d['unit'], unit_scale=d['unit_scale'],
unit_divisor=d['unit_divisor']), "]"
)
options.setdefault('transient', not self.leave)
self._prog = Progress(*progress, **options)
self._prog.__enter__()
self._task_id = self._prog.add_task(self.desc or "", **d)
def close(self):
if self.disable:
return
self.display() # print 100%, vis #1306
super().close()
self._prog.__exit__(None, None, None)
def clear(self, *_, **__):
pass
def display(self, *_, **__):
if not hasattr(self, '_prog'):
return
self._prog.update(self._task_id, completed=self.n, description=self.desc)
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
if hasattr(self, '_prog'):
self._prog.reset(total=total)
super().reset(total=total)
def trrange(*args, **kwargs):
"""Shortcut for `tqdm.rich.tqdm(range(*args), **kwargs)`."""
return tqdm_rich(range(*args), **kwargs)
# Aliases
tqdm = tqdm_rich
trange = trrange
| tqdm_rich |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-incremovable-subarrays-i.py | {
"start": 44,
"end": 681
} | class ____(object):
def incremovableSubarrayCount(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
for j in reversed(xrange(1, len(nums))):
if not nums[j-1] < nums[j]:
break
else:
return (len(nums)+1)*len(nums)//2
result = len(nums)-j+1
for i in xrange(len(nums)-1):
while j < len(nums) and not (nums[i] < nums[j]):
j += 1
result += len(nums)-j+1
if not (nums[i] < nums[i+1]):
break
return result
# Time: O(n^3)
# Space: O(1)
# brute force
| Solution |
python | tqdm__tqdm | tqdm/std.py | {
"start": 7811,
"end": 57461
} | class ____(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` and `nrows` to the
environment (allowing for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s, eta.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
Whether to write bytes. If (default: False) will write unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
nrows : int, optional
The screen height. If specified, hides nested bars outside this
bound. If unspecified, attempts to use environment height.
The fallback is 20.
colour : str, optional
Bar colour (e.g. 'green', '#00ff00').
delay : float, optional
Don't display until [default: 0] seconds have elapsed.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_instances = WeakSet()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return f'{num:1.2f}{unit}{suffix}'
return f'{num:2.1f}{unit}{suffix}'
return f'{num:3.0f}{unit}{suffix}'
num /= divisor
return f'{num:3.1f}Y{suffix}'
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
return f'{h:d}:{m:02d}:{s:02d}' if h else f'{m:02d}:{s:02d}'
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = f'{n:.3g}'.replace('e+0', 'e+').replace('e-0', 'e-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
if fp in (sys.stderr, sys.stdout):
getattr(sys.stderr, 'flush', lambda: None)()
getattr(sys.stdout, 'flush', lambda: None)()
def fp_write(s):
fp.write(str(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = disp_len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',
unit_scale=False, rate=None, bar_format=None, postfix=None,
unit_divisor=1000, initial=0, colour=None, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s, eta.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
initial : int or float, optional
The initial counter value [default: 0].
colour : str, optional
Bar colour (e.g. 'green', '#00ff00').
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = (n - initial) / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else f'{rate:5.2f}')
if rate else '?') + unit + '/s'
rate_inv_fmt = (
(format_sizeof(inv_rate) if unit_scale else f'{inv_rate:5.2f}')
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
try:
eta_dt = (datetime.now() + timedelta(seconds=remaining)
if rate and total else datetime.fromtimestamp(0, timezone.utc))
except OverflowError:
eta_dt = datetime.max
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = {
# slight extension of self.format_dict
'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,
'elapsed': elapsed_str, 'elapsed_s': elapsed,
'ncols': ncols, 'desc': prefix or '', 'unit': unit,
'rate': inv_rate if inv_rate and inv_rate > 1 else rate,
'rate_fmt': rate_fmt, 'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'postfix': postfix, 'unit_divisor': unit_divisor,
'colour': colour,
# plus more useful definitions
'remaining': remaining_str, 'remaining_s': remaining,
'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,
**extra_kwargs}
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += f'{percentage:3.0f}%|'
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `{desc}`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar # no `{bar}`; nothing else to do
# Formatting progress bar space available for bar's display
full_bar = Bar(frac,
max(1, ncols - disp_len(nobar)) if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
colour=colour)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = str(bar_format)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(0,
max(1, ncols - disp_len(nobar)) if ncols else 10,
charset=Bar.BLANK, colour=colour)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
else:
# no total: no progressbar, ETA, just progress stats
return (f'{(prefix + ": ") if prefix else ""}'
f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')
def __new__(cls, *_, **__):
instance = object.__new__(cls)
with cls.get_lock(): # also constructs lock if non-existent
cls._instances.add(instance)
# create monitoring thread
if cls.monitor_interval and (cls.monitor is None
or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = {abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos")}
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition another unfixed bar
to fill the new gap.
This means that by default (where all nested bars are unfixed),
order is not maintained but screen flicker/blank space is minimised.
(tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
last = (instance.nrows or 20) - 1
# find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)
instances = list(filter(
lambda i: hasattr(i, "pos") and last <= i.pos,
cls._instances))
# set first found to current `pos`
if instances:
inst = min(instances, key=lambda i: i.pos)
inst.clear(nolock=True)
inst.pos = abs(instance.pos)
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
try:
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
finally:
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(cls, **tqdm_kwargs):
"""
Registers the current `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be created every time `progress_apply` is called,
and each instance will automatically `close()` upon completion.
Parameters
----------
tqdm_kwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
<https://stackoverflow.com/questions/18603270/\
progress-indicator-during-pandas-operations-python>
"""
from warnings import catch_warnings, simplefilter
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
with catch_warnings():
simplefilter("ignore", category=FutureWarning)
from pandas import Panel
except ImportError: # pandas>=1.2.0
Panel = None
Rolling, Expanding = None, None
try: # pandas>=1.0.0
from pandas.core.window.rolling import _Rolling_and_Expanding
except ImportError:
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pandas>=1.2.0
try: # pandas>=1.2.0
from pandas.core.window.expanding import Expanding
from pandas.core.window.rolling import Rolling
_Rolling_and_Expanding = Rolling, Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy
from pandas.core.groupby.generic import DataFrameGroupBy
except ImportError: # pragma: no cover
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError: # pragma: no cover
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
tqdm_kwargs = tqdm_kwargs.copy()
deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif (_Rolling_and_Expanding is None or
not isinstance(df, _Rolling_and_Expanding)):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = cls(total=total, **tqdm_kwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try: # pandas>=1.3.0
from pandas.core.common import is_builtin_func
except ImportError:
is_builtin_func = df._is_builtin_func
try:
func = is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
try:
return getattr(df, df_function)(wrapper, **kwargs)
finally:
t.close()
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
DataFrame.progress_map = inner_generator('map')
DataFrameGroupBy.progress_map = inner_generator('map')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if Rolling is not None and Expanding is not None:
Rolling.progress_apply = inner_generator()
Expanding.progress_apply = inner_generator()
elif _Rolling_and_Expanding is not None:
_Rolling_and_Expanding.progress_apply = inner_generator()
# override defaults via env vars
@envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,
'position': int, 'nrows': int})
def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,
ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,
ascii=None, disable=False, unit='it', unit_scale=False,
dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,
position=None, postfix=None, unit_divisor=1000, write_bytes=False,
lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,
**kwargs):
"""see tqdm.tqdm for arguments"""
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
file = DisableOnWriteError(file, tqdm_instance=self)
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
self.leave = leave
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if (
(ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))
) or dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _screen_shape_wrapper()
if dynamic_ncols:
ncols, nrows = dynamic_ncols(file)
else:
_dynamic_ncols = _screen_shape_wrapper()
if _dynamic_ncols:
_ncols, _nrows = _dynamic_ncols(file)
if ncols is None:
ncols = _ncols
if nrows is None:
nrows = _nrows
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and ascii is not True and not _is_ascii(ascii):
# Convert bar format into unicode since terminal uses unicode
bar_format = str(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.nrows = nrows
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.initial = initial
self.lock_args = lock_args
self.delay = delay
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self._ema_dn = EMA(smoothing)
self._ema_dt = EMA(smoothing)
self._ema_miniters = EMA(smoothing)
self.bar_format = bar_format
self.postfix = None
self.colour = colour
self._time = time
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
# mark fixed positions as negative
self.pos = self._get_free_pos(self) if position is None else -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if delay <= 0:
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __len__(self):
return (
self.total if self.iterable is None
else self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else self.iterable.__length_hint__() if hasattr(self.iterable, "__length_hint__")
else getattr(self, "total", None))
def __reversed__(self):
try:
orig = self.iterable
except AttributeError:
raise TypeError("'tqdm' object is not reversible")
else:
self.iterable = reversed(self.iterable)
return self.__iter__()
finally:
self.iterable = orig
def __contains__(self, item):
contains = getattr(self.iterable, '__contains__', None)
return contains(item) if contains is not None else item in self.__iter__()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __str__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
last_print_t = self.last_print_t
last_print_n = self.last_print_n
min_start_t = self.start_t + self.delay
n = self.n
time = self._time
try:
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
if n - last_print_n >= self.miniters:
cur_t = time()
dt = cur_t - last_print_t
if dt >= mininterval and cur_t >= min_start_t:
self.update(n - last_print_n)
last_print_n = self.last_print_n
last_print_t = self.last_print_t
finally:
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
Returns
-------
out : bool or None
True if a `display()` was triggered.
"""
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
cur_t = self._time()
dt = cur_t - self.last_print_t
if dt >= self.mininterval and cur_t >= self.start_t + self.delay:
cur_t = self._time()
dn = self.n - self.last_print_n # >= n
if self.smoothing and dt and dn:
# EMA (not just overall average)
self._ema_dn(dn)
self._ema_dt(dt)
self.refresh(lock_args=self.lock_args)
if self.dynamic_miniters:
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.maxinterval and dt >= self.maxinterval:
self.miniters = dn * (self.mininterval or self.maxinterval) / dt
elif self.smoothing:
# EMA miniters update
self.miniters = self._ema_miniters(
dn * (self.mininterval / dt if self.mininterval and dt
else 1))
else:
# max iters between two prints
self.miniters = max(self.miniters, dn)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
return True
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
if self.last_print_t < self.start_t + self.delay:
# haven't ever displayed; nothing to clear
return
# GUI mode
if getattr(self, 'sp', None) is None:
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(str(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self._ema_dt = lambda: None
self.display(pos=0)
fp_write('\n')
else:
# clear previous display
if self.display(msg='', pos=pos) and not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
pos = abs(self.pos)
if pos < (self.nrows or 20):
self.moveto(pos)
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-pos)
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
if self.disable:
return
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.n = 0
if total is not None:
self.total = total
if self.disable:
return
self.last_print_n = 0
self.last_print_t = self.start_t = self._time()
self._ema_dn = EMA(self.smoothing)
self._ema_dt = EMA(self.smoothing)
self._ema_miniters = EMA(self.smoothing)
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], str):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write('\n' * n + _term_move_up() * -n)
getattr(self.fp, 'flush', lambda: None)()
@property
def format_dict(self):
"""Public API for read-only member access."""
if self.disable and not hasattr(self, 'unit'):
return defaultdict(lambda: None, {
'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})
if self.dynamic_ncols:
self.ncols, self.nrows = self.dynamic_ncols(self.fp)
return {
'n': self.n, 'total': self.total,
'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,
'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,
'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,
'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,
'bar_format': self.bar_format, 'postfix': self.postfix,
'unit_divisor': self.unit_divisor, 'initial': self.initial,
'colour': self.colour}
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
nrows = self.nrows or 20
if pos >= nrows - 1:
if pos >= nrows:
return False
if msg or msg is None: # override at `nrows - 1`
msg = " ... (more hidden) ..."
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
if pos:
self.moveto(pos)
self.sp(self.__str__() if msg is None else msg)
if pos:
self.moveto(-pos)
return True
@classmethod
@contextmanager
def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with cls(total=total, **tqdm_kwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""Shortcut for tqdm(range(*args), **kwargs)."""
return tqdm(range(*args), **kwargs)
| tqdm |
python | pypa__hatch | backend/src/hatchling/builders/binary.py | {
"start": 275,
"end": 3130
} | class ____(BuilderConfig):
SUPPORTED_VERSIONS = ("3.12", "3.11", "3.10", "3.9", "3.8", "3.7")
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__scripts: list[str] | None = None
self.__python_version: str | None = None
self.__pyapp_version: str | None = None
@property
def scripts(self) -> list[str]:
if self.__scripts is None:
known_scripts = self.builder.metadata.core.scripts
scripts = self.target_config.get("scripts", [])
if not isinstance(scripts, list):
message = f"Field `tool.hatch.build.targets.{self.plugin_name}.scripts` must be an array"
raise TypeError(message)
for i, script in enumerate(scripts, 1):
if not isinstance(script, str):
message = (
f"Script #{i} of field `tool.hatch.build.targets.{self.plugin_name}.scripts` must be a string"
)
raise TypeError(message)
if script not in known_scripts:
message = f"Unknown script in field `tool.hatch.build.targets.{self.plugin_name}.scripts`: {script}"
raise ValueError(message)
self.__scripts = sorted(set(scripts)) if scripts else list(known_scripts)
return self.__scripts
@property
def python_version(self) -> str:
if self.__python_version is None:
python_version = self.target_config.get("python-version", "")
if not isinstance(python_version, str):
message = f"Field `tool.hatch.build.targets.{self.plugin_name}.python-version` must be a string"
raise TypeError(message)
if not python_version and "PYAPP_DISTRIBUTION_SOURCE" not in os.environ:
for supported_version in self.SUPPORTED_VERSIONS:
if self.builder.metadata.core.python_constraint.contains(supported_version):
python_version = supported_version
break
else:
message = "Field `project.requires-python` is incompatible with the known distributions"
raise ValueError(message)
self.__python_version = python_version
return self.__python_version
@property
def pyapp_version(self) -> str:
if self.__pyapp_version is None:
pyapp_version = self.target_config.get("pyapp-version", "")
if not isinstance(pyapp_version, str):
message = f"Field `tool.hatch.build.targets.{self.plugin_name}.pyapp-version` must be a string"
raise TypeError(message)
self.__pyapp_version = pyapp_version
return self.__pyapp_version
| BinaryBuilderConfig |
python | crytic__slither | slither/visitors/expression/write_var.py | {
"start": 1474,
"end": 5914
} | class ____(ExpressionVisitor):
def __init__(self, expression: Expression) -> None:
self._result: Optional[List[Expression]] = None
super().__init__(expression)
def result(self) -> List[Any]:
if self._result is None:
self._result = list(set(get(self.expression)))
return self._result
def _post_binary_operation(self, expression: BinaryOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
def _post_call_expression(self, expression: CallExpression) -> None:
called = get(expression.called)
args = [get(a) for a in expression.arguments if a]
args = [item for sublist in args for item in sublist]
val = called + args
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
def _post_conditional_expression(self, expression: ConditionalExpression) -> None:
if_expr = get(expression.if_expression)
else_expr = get(expression.else_expression)
then_expr = get(expression.then_expression)
val = if_expr + else_expr + then_expr
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
def _post_assignement_operation(self, expression: AssignmentOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
def _post_elementary_type_name_expression(
self, expression: ElementaryTypeNameExpression
) -> None:
set_val(expression, [])
# save only identifier expression
def _post_identifier(self, expression: Identifier) -> None:
if expression.is_lvalue:
set_val(expression, [expression])
else:
set_val(expression, [])
# if isinstance(expression.value, Variable):
# set_val(expression, [expression.value])
# else:
# set_val(expression, [])
def _post_index_access(self, expression: IndexAccess) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
if expression.is_lvalue:
# val += [expression]
val += [expression.expression_left]
# n = expression.expression_left
# parse all the a.b[..].c[..]
# while isinstance(n, (IndexAccess, MemberAccess)):
# if isinstance(n, IndexAccess):
# val += [n.expression_left]
# n = n.expression_left
# else:
# val += [n.expression]
# n = n.expression
set_val(expression, val)
def _post_literal(self, expression: Literal) -> None:
set_val(expression, [])
def _post_member_access(self, expression: MemberAccess) -> None:
expr = get(expression.expression)
val = expr
if expression.is_lvalue:
val += [expression]
val += [expression.expression]
set_val(expression, val)
def _post_new_array(self, expression: NewArray) -> None:
set_val(expression, [])
def _post_new_contract(self, expression: NewContract) -> None:
set_val(expression, [])
def _post_new_elementary_type(self, expression: NewElementaryType) -> None:
set_val(expression, [])
def _post_tuple_expression(self, expression: TupleExpression) -> None:
expressions = [get(e) for e in expression.expressions if e]
val = [item for sublist in expressions for item in sublist]
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
def _post_type_conversion(self, expression: TypeConversion) -> None:
expr = get(expression.expression)
val = expr
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
def _post_unary_operation(self, expression: UnaryOperation) -> None:
expr = get(expression.expression)
val = expr
if expression.is_lvalue:
val += [expression]
set_val(expression, val)
| WriteVar |
python | getsentry__sentry | src/sentry/api/endpoints/project_performance_general_settings.py | {
"start": 595,
"end": 755
} | class ____(serializers.Serializer):
enable_images = serializers.BooleanField(required=False)
@region_silo_endpoint
| ProjectPerformanceGeneralSettingsSerializer |
python | django__django | django/template/loaders/filesystem.py | {
"start": 257,
"end": 1506
} | class ____(BaseLoader):
def __init__(self, engine, dirs=None):
super().__init__(engine)
self.dirs = dirs
def get_dirs(self):
return self.dirs if self.dirs is not None else self.engine.dirs
def get_contents(self, origin):
try:
with open(origin.name, encoding=self.engine.file_charset) as fp:
return fp.read()
except FileNotFoundError:
raise TemplateDoesNotExist(origin)
def get_template_sources(self, template_name):
"""
Return an Origin object pointing to an absolute path in each directory
in template_dirs. For security reasons, if a path doesn't lie inside
one of the template_dirs it is excluded from the result set.
"""
for template_dir in self.get_dirs():
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
continue
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
| Loader |
python | paramiko__paramiko | paramiko/pkey.py | {
"start": 3106,
"end": 33863
} | class ____:
"""
Base class for public keys.
Also includes some "meta" level convenience constructors such as
`.from_type_string`.
"""
# known encryption types for private key files:
_CIPHER_TABLE = {
"AES-128-CBC": {
"cipher": algorithms.AES,
"keysize": 16,
"blocksize": 16,
"mode": modes.CBC,
},
"AES-256-CBC": {
"cipher": algorithms.AES,
"keysize": 32,
"blocksize": 16,
"mode": modes.CBC,
},
"DES-EDE3-CBC": {
"cipher": TripleDES,
"keysize": 24,
"blocksize": 8,
"mode": modes.CBC,
},
}
_PRIVATE_KEY_FORMAT_ORIGINAL = 1
_PRIVATE_KEY_FORMAT_OPENSSH = 2
BEGIN_TAG = re.compile(r"^-{5}BEGIN (RSA|EC|OPENSSH) PRIVATE KEY-{5}\s*$")
END_TAG = re.compile(r"^-{5}END (RSA|EC|OPENSSH) PRIVATE KEY-{5}\s*$")
@staticmethod
def from_path(path, passphrase=None):
"""
Attempt to instantiate appropriate key subclass from given file path.
:param Path path: The path to load (may also be a `str`).
:returns:
A `PKey` subclass instance.
:raises:
`UnknownKeyType`, if our crypto backend doesn't know this key type.
.. versionadded:: 3.2
"""
# TODO: make sure sphinx is reading Path right in param list...
# Lazy import to avoid circular import issues
from paramiko import RSAKey, Ed25519Key, ECDSAKey
# Normalize to string, as cert suffix isn't quite an extension, so
# pathlib isn't useful for this.
path = str(path)
# Sort out cert vs key, i.e. it is 'legal' to hand this kind of API
# /either/ the key /or/ the cert, when there is a key/cert pair.
cert_suffix = "-cert.pub"
if str(path).endswith(cert_suffix):
key_path = path[: -len(cert_suffix)]
cert_path = path
else:
key_path = path
cert_path = path + cert_suffix
key_path = Path(key_path).expanduser()
cert_path = Path(cert_path).expanduser()
data = key_path.read_bytes()
# Like OpenSSH, try modern/OpenSSH-specific key load first
try:
loaded = serialization.load_ssh_private_key(
data=data, password=passphrase
)
# Then fall back to assuming legacy PEM type
except ValueError:
loaded = serialization.load_pem_private_key(
data=data, password=passphrase
)
# TODO Python 3.10: match statement? (NOTE: we cannot use a dict
# because the results from the loader are literal backend, eg openssl,
# private classes, so isinstance tests work but exact 'x class is y'
# tests will not work)
# TODO: leverage already-parsed/math'd obj to avoid duplicate cpu
# cycles? seemingly requires most of our key subclasses to be rewritten
# to be cryptography-object-forward. this is still likely faster than
# the old SSHClient code that just tried instantiating every class!
key_class = None
if isinstance(loaded, asymmetric.rsa.RSAPrivateKey):
key_class = RSAKey
elif isinstance(loaded, asymmetric.ed25519.Ed25519PrivateKey):
key_class = Ed25519Key
elif isinstance(loaded, asymmetric.ec.EllipticCurvePrivateKey):
key_class = ECDSAKey
else:
raise UnknownKeyType(key_bytes=data, key_type=loaded.__class__)
with key_path.open() as fd:
key = key_class.from_private_key(fd, password=passphrase)
if cert_path.exists():
# load_certificate can take Message, path-str, or value-str
key.load_certificate(str(cert_path))
return key
@staticmethod
def from_type_string(key_type, key_bytes):
"""
Given type `str` & raw `bytes`, return a `PKey` subclass instance.
For example, ``PKey.from_type_string("ssh-ed25519", <public bytes>)``
will (if successful) return a new `.Ed25519Key`.
:param str key_type:
The key type, eg ``"ssh-ed25519"``.
:param bytes key_bytes:
The raw byte data forming the key material, as expected by
subclasses' ``data`` parameter.
:returns:
A `PKey` subclass instance.
:raises:
`UnknownKeyType`, if no registered classes knew about this type.
.. versionadded:: 3.2
"""
from paramiko import key_classes
for key_class in key_classes:
if key_type in key_class.identifiers():
# TODO: needs to passthru things like passphrase
return key_class(data=key_bytes)
raise UnknownKeyType(key_type=key_type, key_bytes=key_bytes)
@classmethod
def identifiers(cls):
"""
returns an iterable of key format/name strings this class can handle.
Most classes only have a single identifier, and thus this default
implementation suffices; see `.ECDSAKey` for one example of an
override.
"""
return [cls.name]
# TODO 4.0: make this and subclasses consistent, some of our own
# classmethods even assume kwargs we don't define!
# TODO 4.0: prob also raise NotImplementedError instead of pass'ing; the
# contract is pretty obviously that you need to handle msg/data/filename
# appropriately. (If 'pass' is a concession to testing, see about doing the
# work to fix the tests instead)
def __init__(self, msg=None, data=None):
"""
Create a new instance of this public key type. If ``msg`` is given,
the key's public part(s) will be filled in from the message. If
``data`` is given, the key's public part(s) will be filled in from
the string.
:param .Message msg:
an optional SSH `.Message` containing a public key of this type.
:param bytes data:
optional, the bytes of a public key of this type
:raises: `.SSHException` --
if a key cannot be created from the ``data`` or ``msg`` given, or
no key was passed in.
"""
pass
# TODO: arguably this might want to be __str__ instead? ehh
# TODO: ditto the interplay between showing class name (currently we just
# say PKey writ large) and algorithm (usually == class name, but not
# always, also sometimes shows certificate-ness)
# TODO: if we do change it, we also want to tweak eg AgentKey, as it
# currently displays agent-ness with a suffix
def __repr__(self):
comment = ""
# Works for AgentKey, may work for others?
if hasattr(self, "comment") and self.comment:
comment = f", comment={self.comment!r}"
return f"PKey(alg={self.algorithm_name}, bits={self.get_bits()}, fp={self.fingerprint}{comment})" # noqa
# TODO 4.0: just merge into __bytes__ (everywhere)
def asbytes(self):
"""
Return a string of an SSH `.Message` made up of the public part(s) of
this key. This string is suitable for passing to `__init__` to
re-create the key object later.
"""
return bytes()
def __bytes__(self):
return self.asbytes()
def __eq__(self, other):
return isinstance(other, PKey) and self._fields == other._fields
def __hash__(self):
return hash(self._fields)
@property
def _fields(self):
raise NotImplementedError
def get_name(self):
"""
Return the name of this private key implementation.
:return:
name of this private key type, in SSH terminology, as a `str` (for
example, ``"ssh-rsa"``).
"""
return ""
@property
def algorithm_name(self):
"""
Return the key algorithm identifier for this key.
Similar to `get_name`, but aimed at pure algorithm name instead of SSH
protocol field value.
"""
# Nuke the leading 'ssh-'
# TODO in Python 3.9: use .removeprefix()
name = self.get_name().replace("ssh-", "")
# Trim any cert suffix (but leave the -cert, as OpenSSH does)
cert_tail = "-cert-v01@openssh.com"
if cert_tail in name:
name = name.replace(cert_tail, "-cert")
# Nuke any eg ECDSA suffix, OpenSSH does basically this too.
else:
name = name.split("-")[0]
return name.upper()
def get_bits(self):
"""
Return the number of significant bits in this key. This is useful
for judging the relative security of a key.
:return: bits in the key (as an `int`)
"""
# TODO 4.0: raise NotImplementedError, 0 is unlikely to ever be
# _correct_ and nothing in the critical path seems to use this.
return 0
def can_sign(self):
"""
Return ``True`` if this key has the private part necessary for signing
data.
"""
return False
def get_fingerprint(self):
"""
Return an MD5 fingerprint of the public part of this key. Nothing
secret is revealed.
:return:
a 16-byte `string <str>` (binary) of the MD5 fingerprint, in SSH
format.
"""
return md5(self.asbytes()).digest()
@property
def fingerprint(self):
"""
Modern fingerprint property designed to be comparable to OpenSSH.
Currently only does SHA256 (the OpenSSH default).
.. versionadded:: 3.2
"""
hashy = sha256(bytes(self))
hash_name = hashy.name.upper()
b64ed = encodebytes(hashy.digest())
cleaned = u(b64ed).strip().rstrip("=") # yes, OpenSSH does this too!
return f"{hash_name}:{cleaned}"
def get_base64(self):
"""
Return a base64 string containing the public part of this key. Nothing
secret is revealed. This format is compatible with that used to store
public key files or recognized host keys.
:return: a base64 `string <str>` containing the public part of the key.
"""
return u(encodebytes(self.asbytes())).replace("\n", "")
def sign_ssh_data(self, data, algorithm=None):
"""
Sign a blob of data with this private key, and return a `.Message`
representing an SSH signature message.
:param bytes data:
the data to sign.
:param str algorithm:
the signature algorithm to use, if different from the key's
internal name. Default: ``None``.
:return: an SSH signature `message <.Message>`.
.. versionchanged:: 2.9
Added the ``algorithm`` kwarg.
"""
return bytes()
def verify_ssh_sig(self, data, msg):
"""
Given a blob of data, and an SSH message representing a signature of
that data, verify that it was signed with this key.
:param bytes data: the data that was signed.
:param .Message msg: an SSH signature message
:return:
``True`` if the signature verifies correctly; ``False`` otherwise.
"""
return False
@classmethod
def from_private_key_file(cls, filename, password=None):
"""
Create a key object by reading a private key file. If the private
key is encrypted and ``password`` is not ``None``, the given password
will be used to decrypt the key (otherwise `.PasswordRequiredException`
is thrown). Through the magic of Python, this factory method will
exist in all subclasses of PKey (such as `.RSAKey`), but
is useless on the abstract PKey class.
:param str filename: name of the file to read
:param str password:
an optional password to use to decrypt the key file, if it's
encrypted
:return: a new `.PKey` based on the given private key
:raises: ``IOError`` -- if there was an error reading the file
:raises: `.PasswordRequiredException` -- if the private key file is
encrypted, and ``password`` is ``None``
:raises: `.SSHException` -- if the key file is invalid
"""
key = cls(filename=filename, password=password)
return key
@classmethod
def from_private_key(cls, file_obj, password=None):
"""
Create a key object by reading a private key from a file (or file-like)
object. If the private key is encrypted and ``password`` is not
``None``, the given password will be used to decrypt the key (otherwise
`.PasswordRequiredException` is thrown).
:param file_obj: the file-like object to read from
:param str password:
an optional password to use to decrypt the key, if it's encrypted
:return: a new `.PKey` based on the given private key
:raises: ``IOError`` -- if there was an error reading the key
:raises: `.PasswordRequiredException` --
if the private key file is encrypted, and ``password`` is ``None``
:raises: `.SSHException` -- if the key file is invalid
"""
key = cls(file_obj=file_obj, password=password)
return key
def write_private_key_file(self, filename, password=None):
"""
Write private key contents into a file. If the password is not
``None``, the key is encrypted before writing.
:param str filename: name of the file to write
:param str password:
an optional password to use to encrypt the key file
:raises: ``IOError`` -- if there was an error writing the file
:raises: `.SSHException` -- if the key is invalid
"""
raise Exception("Not implemented in PKey")
def write_private_key(self, file_obj, password=None):
"""
Write private key contents into a file (or file-like) object. If the
password is not ``None``, the key is encrypted before writing.
:param file_obj: the file-like object to write into
:param str password: an optional password to use to encrypt the key
:raises: ``IOError`` -- if there was an error writing to the file
:raises: `.SSHException` -- if the key is invalid
"""
# TODO 4.0: NotImplementedError (plus everywhere else in here)
raise Exception("Not implemented in PKey")
def _read_private_key_file(self, tag, filename, password=None):
"""
Read an SSH2-format private key file, looking for a string of the type
``"BEGIN xxx PRIVATE KEY"`` for some ``xxx``, base64-decode the text we
find, and return it as a string. If the private key is encrypted and
``password`` is not ``None``, the given password will be used to
decrypt the key (otherwise `.PasswordRequiredException` is thrown).
:param str tag:
``"RSA"`` (or etc), the tag used to mark the data block.
:param str filename:
name of the file to read.
:param str password:
an optional password to use to decrypt the key file, if it's
encrypted.
:return:
the `bytes` that make up the private key.
:raises: ``IOError`` -- if there was an error reading the file.
:raises: `.PasswordRequiredException` -- if the private key file is
encrypted, and ``password`` is ``None``.
:raises: `.SSHException` -- if the key file is invalid.
"""
with open(filename, "r") as f:
data = self._read_private_key(tag, f, password)
return data
def _read_private_key(self, tag, f, password=None):
lines = f.readlines()
if not lines:
raise SSHException("no lines in {} private key file".format(tag))
# find the BEGIN tag
start = 0
m = self.BEGIN_TAG.match(lines[start])
line_range = len(lines) - 1
while start < line_range and not m:
start += 1
m = self.BEGIN_TAG.match(lines[start])
start += 1
keytype = m.group(1) if m else None
if start >= len(lines) or keytype is None:
raise SSHException("not a valid {} private key file".format(tag))
# find the END tag
end = start
m = self.END_TAG.match(lines[end])
while end < line_range and not m:
end += 1
m = self.END_TAG.match(lines[end])
if keytype == tag:
data = self._read_private_key_pem(lines, end, password)
pkformat = self._PRIVATE_KEY_FORMAT_ORIGINAL
elif keytype == "OPENSSH":
data = self._read_private_key_openssh(lines[start:end], password)
pkformat = self._PRIVATE_KEY_FORMAT_OPENSSH
else:
raise SSHException(
"encountered {} key, expected {} key".format(keytype, tag)
)
return pkformat, data
def _got_bad_key_format_id(self, id_):
err = "{}._read_private_key() spat out an unknown key format id '{}'"
raise SSHException(err.format(self.__class__.__name__, id_))
def _read_private_key_pem(self, lines, end, password):
start = 0
# parse any headers first
headers = {}
start += 1
while start < len(lines):
line = lines[start].split(": ")
if len(line) == 1:
break
headers[line[0].lower()] = line[1].strip()
start += 1
# if we trudged to the end of the file, just try to cope.
try:
data = decodebytes(b("".join(lines[start:end])))
except base64.binascii.Error as e:
raise SSHException("base64 decoding error: {}".format(e))
if "proc-type" not in headers:
# unencryped: done
return data
# encrypted keyfile: will need a password
proc_type = headers["proc-type"]
if proc_type != "4,ENCRYPTED":
raise SSHException(
'Unknown private key structure "{}"'.format(proc_type)
)
try:
encryption_type, saltstr = headers["dek-info"].split(",")
except:
raise SSHException("Can't parse DEK-info in private key file")
if encryption_type not in self._CIPHER_TABLE:
raise SSHException(
'Unknown private key cipher "{}"'.format(encryption_type)
)
# if no password was passed in,
# raise an exception pointing out that we need one
if password is None:
raise PasswordRequiredException("Private key file is encrypted")
cipher = self._CIPHER_TABLE[encryption_type]["cipher"]
keysize = self._CIPHER_TABLE[encryption_type]["keysize"]
mode = self._CIPHER_TABLE[encryption_type]["mode"]
salt = unhexlify(b(saltstr))
key = util.generate_key_bytes(md5, salt, password, keysize)
decryptor = Cipher(
cipher(key), mode(salt), backend=default_backend()
).decryptor()
decrypted_data = decryptor.update(data) + decryptor.finalize()
unpadder = padding.PKCS7(cipher.block_size).unpadder()
try:
return unpadder.update(decrypted_data) + unpadder.finalize()
except ValueError:
raise SSHException("Bad password or corrupt private key file")
def _read_private_key_openssh(self, lines, password):
"""
Read the new OpenSSH SSH2 private key format available
since OpenSSH version 6.5
Reference:
https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
"""
try:
data = decodebytes(b("".join(lines)))
except base64.binascii.Error as e:
raise SSHException("base64 decoding error: {}".format(e))
# read data struct
auth_magic = data[:15]
if auth_magic != OPENSSH_AUTH_MAGIC:
raise SSHException("unexpected OpenSSH key header encountered")
cstruct = self._uint32_cstruct_unpack(data[15:], "sssur")
cipher, kdfname, kdf_options, num_pubkeys, remainder = cstruct
# For now, just support 1 key.
if num_pubkeys > 1:
raise SSHException(
"unsupported: private keyfile has multiple keys"
)
pubkey, privkey_blob = self._uint32_cstruct_unpack(remainder, "ss")
if kdfname == b("bcrypt"):
if cipher == b("aes256-cbc"):
mode = modes.CBC
elif cipher == b("aes256-ctr"):
mode = modes.CTR
else:
raise SSHException(
"unknown cipher `{}` used in private key file".format(
cipher.decode("utf-8")
)
)
# Encrypted private key.
# If no password was passed in, raise an exception pointing
# out that we need one
if password is None:
raise PasswordRequiredException(
"private key file is encrypted"
)
# Unpack salt and rounds from kdfoptions
salt, rounds = self._uint32_cstruct_unpack(kdf_options, "su")
# run bcrypt kdf to derive key and iv/nonce (32 + 16 bytes)
key_iv = bcrypt.kdf(
b(password),
b(salt),
48,
rounds,
# We can't control how many rounds are on disk, so no sense
# warning about it.
ignore_few_rounds=True,
)
key = key_iv[:32]
iv = key_iv[32:]
# decrypt private key blob
decryptor = Cipher(
algorithms.AES(key), mode(iv), default_backend()
).decryptor()
decrypted_privkey = decryptor.update(privkey_blob)
decrypted_privkey += decryptor.finalize()
elif cipher == b("none") and kdfname == b("none"):
# Unencrypted private key
decrypted_privkey = privkey_blob
else:
raise SSHException(
"unknown cipher or kdf used in private key file"
)
# Unpack private key and verify checkints
cstruct = self._uint32_cstruct_unpack(decrypted_privkey, "uusr")
checkint1, checkint2, keytype, keydata = cstruct
if checkint1 != checkint2:
raise SSHException(
"OpenSSH private key file checkints do not match"
)
return _unpad_openssh(keydata)
def _uint32_cstruct_unpack(self, data, strformat):
"""
Used to read new OpenSSH private key format.
Unpacks a c data structure containing a mix of 32-bit uints and
variable length strings prefixed by 32-bit uint size field,
according to the specified format. Returns the unpacked vars
in a tuple.
Format strings:
s - denotes a string
i - denotes a long integer, encoded as a byte string
u - denotes a 32-bit unsigned integer
r - the remainder of the input string, returned as a string
"""
arr = []
idx = 0
try:
for f in strformat:
if f == "s":
# string
s_size = struct.unpack(">L", data[idx : idx + 4])[0]
idx += 4
s = data[idx : idx + s_size]
idx += s_size
arr.append(s)
if f == "i":
# long integer
s_size = struct.unpack(">L", data[idx : idx + 4])[0]
idx += 4
s = data[idx : idx + s_size]
idx += s_size
i = util.inflate_long(s, True)
arr.append(i)
elif f == "u":
# 32-bit unsigned int
u = struct.unpack(">L", data[idx : idx + 4])[0]
idx += 4
arr.append(u)
elif f == "r":
# remainder as string
s = data[idx:]
arr.append(s)
break
except Exception as e:
# PKey-consuming code frequently wants to save-and-skip-over issues
# with loading keys, and uses SSHException as the (really friggin
# awful) signal for this. So for now...we do this.
raise SSHException(str(e))
return tuple(arr)
def _write_private_key_file(self, filename, key, format, password=None):
"""
Write an SSH2-format private key file in a form that can be read by
paramiko or openssh. If no password is given, the key is written in
a trivially-encoded format (base64) which is completely insecure. If
a password is given, DES-EDE3-CBC is used.
:param str tag:
``"RSA"`` or etc, the tag used to mark the data block.
:param filename: name of the file to write.
:param bytes data: data blob that makes up the private key.
:param str password: an optional password to use to encrypt the file.
:raises: ``IOError`` -- if there was an error writing the file.
"""
# Ensure that we create new key files directly with a user-only mode,
# instead of opening, writing, then chmodding, which leaves us open to
# CVE-2022-24302.
with os.fdopen(
os.open(
filename,
# NOTE: O_TRUNC is a noop on new files, and O_CREAT is a noop
# on existing files, so using all 3 in both cases is fine.
flags=os.O_WRONLY | os.O_TRUNC | os.O_CREAT,
# Ditto the use of the 'mode' argument; it should be safe to
# give even for existing files (though it will not act like a
# chmod in that case).
mode=o600,
),
# Yea, you still gotta inform the FLO that it is in "write" mode.
"w",
) as f:
self._write_private_key(f, key, format, password=password)
def _write_private_key(self, f, key, format, password=None):
if password is None:
encryption = serialization.NoEncryption()
else:
encryption = serialization.BestAvailableEncryption(b(password))
f.write(
key.private_bytes(
serialization.Encoding.PEM, format, encryption
).decode()
)
def _check_type_and_load_cert(self, msg, key_type, cert_type):
"""
Perform message type-checking & optional certificate loading.
This includes fast-forwarding cert ``msg`` objects past the nonce, so
that the subsequent fields are the key numbers; thus the caller may
expect to treat the message as key material afterwards either way.
The obtained key type is returned for classes which need to know what
it was (e.g. ECDSA.)
"""
# Normalization; most classes have a single key type and give a string,
# but eg ECDSA is a 1:N mapping.
key_types = key_type
cert_types = cert_type
if isinstance(key_type, str):
key_types = [key_types]
if isinstance(cert_types, str):
cert_types = [cert_types]
# Can't do much with no message, that should've been handled elsewhere
if msg is None:
raise SSHException("Key object may not be empty")
# First field is always key type, in either kind of object. (make sure
# we rewind before grabbing it - sometimes caller had to do their own
# introspection first!)
msg.rewind()
type_ = msg.get_text()
# Regular public key - nothing special to do besides the implicit
# type check.
if type_ in key_types:
pass
# OpenSSH-compatible certificate - store full copy as .public_blob
# (so signing works correctly) and then fast-forward past the
# nonce.
elif type_ in cert_types:
# This seems the cleanest way to 'clone' an already-being-read
# message; they're *IO objects at heart and their .getvalue()
# always returns the full value regardless of pointer position.
self.load_certificate(Message(msg.asbytes()))
# Read out nonce as it comes before the public numbers - our caller
# is likely going to use the (only borrowed by us, not owned)
# 'msg' object for loading those numbers right after this.
# TODO: usefully interpret it & other non-public-number fields
# (requires going back into per-type subclasses.)
msg.get_string()
else:
err = "Invalid key (class: {}, data type: {}"
raise SSHException(err.format(self.__class__.__name__, type_))
def load_certificate(self, value):
"""
Supplement the private key contents with data loaded from an OpenSSH
public key (``.pub``) or certificate (``-cert.pub``) file, a string
containing such a file, or a `.Message` object.
The .pub contents adds no real value, since the private key
file includes sufficient information to derive the public
key info. For certificates, however, this can be used on
the client side to offer authentication requests to the server
based on certificate instead of raw public key.
See:
https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.certkeys
Note: very little effort is made to validate the certificate contents,
that is for the server to decide if it is good enough to authenticate
successfully.
"""
if isinstance(value, Message):
constructor = "from_message"
elif os.path.isfile(value):
constructor = "from_file"
else:
constructor = "from_string"
blob = getattr(PublicBlob, constructor)(value)
if not blob.key_type.startswith(self.get_name()):
err = "PublicBlob type {} incompatible with key type {}"
raise ValueError(err.format(blob.key_type, self.get_name()))
self.public_blob = blob
# General construct for an OpenSSH style Public Key blob
# readable from a one-line file of the format:
# <key-name> <base64-blob> [<comment>]
# Of little value in the case of standard public keys
# {ssh-rsa, ssh-ecdsa, ssh-ed25519}, but should
# provide rudimentary support for {*-cert.v01}
| PKey |
python | zostera__django-bootstrap4 | tests/test_formsets.py | {
"start": 218,
"end": 388
} | class ____(TestCase):
def test_illegal_formset(self):
with self.assertRaises(BootstrapError):
render_formset(formset="illegal")
| BootstrapFormSetTest |
python | getsentry__sentry | src/sentry/explore/endpoints/explore_saved_query_starred.py | {
"start": 1045,
"end": 3474
} | class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.EXPLORE
permission_classes = (MemberPermission,)
def has_feature(self, organization, request):
return features.has(
"organizations:visibility-explore-view", organization, actor=request.user
)
def post(self, request: Request, organization: Organization, id: int) -> Response:
"""
Update the starred status of a saved Explore query for the current organization member.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not self.has_feature(organization, request):
return self.respond(status=404)
serializer = StarQuerySerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
is_starred = serializer.validated_data["starred"]
try:
query = ExploreSavedQuery.objects.get(id=id, organization=organization)
except ExploreSavedQuery.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# When unstarring a prebuilt query, we don't delete the starred row from the table.
# This is because prebuilt queries are lazily starred by default for all users when
# fetching saved queries for the first time. We need the starred row to exist to
# prevent the initial lazy-starring from happening again.
if query.prebuilt_id is not None:
if ExploreSavedQueryStarred.objects.updated_starred_query(
organization, request.user.id, query, bool(is_starred)
):
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
if is_starred:
if ExploreSavedQueryStarred.objects.insert_starred_query(
organization, request.user.id, query
):
return Response(status=status.HTTP_200_OK)
else:
if ExploreSavedQueryStarred.objects.delete_starred_query(
organization, request.user.id, query
):
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_204_NO_CONTENT)
| ExploreSavedQueryStarredEndpoint |
python | apache__airflow | dev/breeze/tests/test_ui_commands.py | {
"start": 6378,
"end": 6667
} | class ____:
def test_locale_summary_creation(self):
summary = LocaleSummary(missing_keys={"de": ["key1", "key2"]}, extra_keys={"de": ["key3"]})
assert summary.missing_keys == {"de": ["key1", "key2"]}
assert summary.extra_keys == {"de": ["key3"]}
| TestLocaleSummary |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 186716,
"end": 187640
} | class ____(Request):
"""
Get labels' stats for a dataset version
:param version: Dataset version ID
:type version: str
"""
_service = "datasets"
_action = "get_stats"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"version": {"description": "Dataset version ID", "type": "string"}
},
"required": ["version"],
"type": "object",
}
def __init__(self, version, **kwargs):
super(GetStatsRequest, self).__init__(**kwargs)
self.version = version
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
| GetStatsRequest |
python | langchain-ai__langchain | libs/core/langchain_core/prompt_values.py | {
"start": 2699,
"end": 3057
} | class ____(TypedDict, total=False):
"""Image URL."""
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image.
Can be `'auto'`, `'low'`, or `'high'`.
This follows OpenAI's Chat Completion API's image URL format.
"""
url: str
"""Either a URL of the image or the base64 encoded image data."""
| ImageURL |
python | sympy__sympy | sympy/holonomic/recurrence.py | {
"start": 888,
"end": 2884
} | class ____:
"""
A Recurrence Operator Algebra is a set of noncommutative polynomials
in intermediate `Sn` and coefficients in a base ring A. It follows the
commutation rule:
Sn * a(n) = a(n + 1) * Sn
This class represents a Recurrence Operator Algebra and serves as the parent ring
for Recurrence Operators.
Examples
========
>>> from sympy import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.recurrence import RecurrenceOperators
>>> n = symbols('n', integer=True)
>>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
>>> R
Univariate Recurrence Operator Algebra in intermediate Sn over the base ring
ZZ[n]
See Also
========
RecurrenceOperator
"""
def __init__(self, base, generator):
# the base ring for the algebra
self.base = base
# the operator representing shift i.e. `Sn`
self.shift_operator = RecurrenceOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Sn', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Recurrence Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
def _add_lists(list1, list2):
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
| RecurrenceOperatorAlgebra |
python | huggingface__transformers | tests/models/fsmt/test_tokenization_fsmt.py | {
"start": 1029,
"end": 6494
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "stas/tiny-wmt19-en-de"
tokenizer_class = FSMTTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
cls.langs = ["en", "ru"]
config = {
"langs": cls.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
cls.src_vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"])
cls.tgt_vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"])
config_file = os.path.join(cls.tmpdirname, "tokenizer_config.json")
cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(cls.src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(cls.tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(cls.merges_file, "w") as fp:
fp.write("\n".join(merges))
with open(config_file, "w") as fp:
fp.write(json.dumps(config))
@cached_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en")
@cached_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
def test_online_tokenizer_config(self):
"""this just tests that the online tokenizer files get correctly fetched and
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
"""
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [2]
assert encoded_pair == text + [2] + text_2 + [2]
@slow
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [
[
"Here's a little song I wrote. Don't worry, be happy.",
[2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2],
],
["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]],
]
# if data needs to be recreated or added, run:
# import torch
# model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe")
# for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""")
for src_text, tgt_input_ids in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
# and decode backward, using the reversed languages model
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
@slow
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True)
tokens = tokenizer.tokenize("USA is United States of America")
expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"]
self.assertListEqual(tokens, expected)
@unittest.skip(reason="FSMTConfig.__init__ requires non-optional args")
def test_torch_encode_plus_sent_to_model(self):
pass
@unittest.skip(reason="FSMTConfig.__init__ requires non-optional args")
def test_np_encode_plus_sent_to_model(self):
pass
| FSMTTokenizationTest |
python | pytransitions__transitions | transitions/extensions/factory.py | {
"start": 2508,
"end": 3128
} | class ____(GraphMachine, LockedMachine):
"""
A threadsafe machine with graph support.
"""
@staticmethod
def format_references(func):
if isinstance(func, partial) and func.func.__name__.startswith('_locked_method'):
return "%s(%s)" % (
func.args[0].__name__,
", ".join(itertools.chain(
(str(_) for _ in func.args[1:]),
("%s=%s" % (key, value)
for key, value in iteritems(func.keywords if func.keywords else {})))))
return GraphMachine.format_references(func)
| LockedGraphMachine |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-steamship/llama_index/readers/steamship/base.py | {
"start": 199,
"end": 3450
} | class ____(BaseReader):
"""
Reads persistent Steamship Files and converts them to Documents.
Args:
api_key: Steamship API key. Defaults to STEAMSHIP_API_KEY value if not provided.
Note:
Requires install of `steamship` package and an active Steamship API Key.
To get a Steamship API Key, visit: https://steamship.com/account/api.
Once you have an API Key, expose it via an environment variable named
`STEAMSHIP_API_KEY` or pass it as an init argument (`api_key`).
"""
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize the Reader."""
try:
import steamship # noqa
self.api_key = api_key
except ImportError:
raise ImportError(
"`steamship` must be installed to use the SteamshipFileReader.\n"
"Please run `pip install --upgrade steamship."
)
def load_data(
self,
workspace: str,
query: Optional[str] = None,
file_handles: Optional[List[str]] = None,
collapse_blocks: bool = True,
join_str: str = "\n\n",
) -> List[Document]:
"""
Load data from persistent Steamship Files into Documents.
Args:
workspace: the handle for a Steamship workspace
(see: https://docs.steamship.com/workspaces/index.html)
query: a Steamship tag query for retrieving files
(ex: 'filetag and value("import-id")="import-001"')
file_handles: a list of Steamship File handles
(ex: `smooth-valley-9kbdr`)
collapse_blocks: whether to merge individual File Blocks into a
single Document, or separate them.
join_str: when collapse_blocks is True, this is how the block texts
will be concatenated.
Note:
The collection of Files from both `query` and `file_handles` will be
combined. There is no (current) support for deconflicting the collections
(meaning that if a file appears both in the result set of the query and
as a handle in file_handles, it will be loaded twice).
"""
from steamship import File, Steamship
client = Steamship(workspace=workspace, api_key=self.api_key)
files = []
if query:
files_from_query = File.query(client=client, tag_filter_query=query).files
files.extend(files_from_query)
if file_handles:
files.extend([File.get(client=client, handle=h) for h in file_handles])
docs = []
for file in files:
metadata = {"source": file.handle}
for tag in file.tags:
metadata[tag.kind] = tag.value
if collapse_blocks:
text = join_str.join([b.text for b in file.blocks])
docs.append(Document(text=text, id_=file.handle, metadata=metadata))
else:
docs.extend(
[
Document(text=b.text, id_=file.handle, metadata=metadata)
for b in file.blocks
]
)
return docs
| SteamshipFileReader |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 3931,
"end": 3997
} | class ____(sqltypes.LargeBinary):
__visit_name__ = "BFILE"
| BFILE |
python | matplotlib__matplotlib | lib/matplotlib/image.py | {
"start": 8112,
"end": 32193
} | class ____(mcolorizer.ColorizingArtist):
"""
Base class for images.
*interpolation* and *cmap* default to their rc settings.
*cmap* is a `.colors.Colormap` instance.
*norm* is a `.colors.Normalize` instance to map luminance to 0-1.
*extent* is a ``(left, right, bottom, top)`` tuple in data coordinates, for
making image plots registered with data plots; the default is to label the
pixel centers with the zero-based row and column indices.
Additional kwargs are `.Artist` properties.
"""
zorder = 0
def __init__(self, ax,
cmap=None,
norm=None,
colorizer=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
*,
interpolation_stage=None,
**kwargs
):
super().__init__(self._get_colorizer(cmap, norm, colorizer))
origin = mpl._val_or_rc(origin, 'image.origin')
_api.check_in_list(["upper", "lower"], origin=origin)
self.origin = origin
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self.set_interpolation(interpolation)
self.set_interpolation_stage(interpolation_stage)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self._internal_update(kwargs)
def __str__(self):
try:
shape = self.get_shape()
return f"{type(self).__name__}(shape={shape!r})"
except RuntimeError:
return type(self).__name__
def __getstate__(self):
# Save some space on the pickle by not saving the cache.
return {**super().__getstate__(), "_imcache": None}
def get_size(self):
"""Return the size of the image as tuple (numrows, numcols)."""
return self.get_shape()[:2]
def get_shape(self):
"""
Return the shape of the image as tuple (numrows, numcols, channels).
"""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
Parameters
----------
alpha : float or 2D array-like or None
"""
martist.Artist._set_alpha_for_array(self, alpha)
if np.ndim(alpha) not in (0, 2):
raise TypeError('alpha must be a float, two-dimensional '
'array, or None')
self._imcache = None
def _get_scalar_alpha(self):
"""
Get a scalar alpha value to be applied to the artist as a whole.
If the alpha value is a matrix, the method returns 1.0 because pixels
have individual alpha values (see `~._ImageBase._make_image` for
details). If the alpha value is a scalar, the method returns said value
to be applied to the artist as a whole because pixels do not have
individual alpha values.
"""
return 1.0 if self._alpha is None or np.ndim(self._alpha) > 0 \
else self._alpha
def changed(self):
"""
Call this whenever the mappable is changed so observers can update.
"""
self._imcache = None
super().changed()
def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
unsampled=False, round_to_pixel_border=True):
"""
Normalize, rescale, and colormap the image *A* from the given *in_bbox*
(in data space), to the given *out_bbox* (in pixel space) clipped to
the given *clip_bbox* (also in pixel space), and magnified by the
*magnification* factor.
Parameters
----------
A : ndarray
- a (M, N) array interpreted as scalar (greyscale) image,
with one of the dtypes `~numpy.float32`, `~numpy.float64`,
`~numpy.float128`, `~numpy.uint16` or `~numpy.uint8`.
- (M, N, 4) RGBA image with a dtype of `~numpy.float32`,
`~numpy.float64`, `~numpy.float128`, or `~numpy.uint8`.
in_bbox : `~matplotlib.transforms.Bbox`
out_bbox : `~matplotlib.transforms.Bbox`
clip_bbox : `~matplotlib.transforms.Bbox`
magnification : float, default: 1
unsampled : bool, default: False
If True, the image will not be scaled, but an appropriate
affine transformation will be returned instead.
round_to_pixel_border : bool, default: True
If True, the output image size will be rounded to the nearest pixel
boundary. This makes the images align correctly with the Axes.
It should not be used if exact scaling is needed, such as for
`.FigureImage`.
Returns
-------
image : (M, N, 4) `numpy.uint8` array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : `~matplotlib.transforms.Affine2D`
The affine transformation from image to pixel space.
"""
if A is None:
raise RuntimeError('You must first set the image '
'array or the image attribute')
if A.size == 0:
raise RuntimeError("_make_image must get a non-empty image. "
"Your Artist's draw method must filter before "
"this method is called.")
clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)
if clipped_bbox is None:
return None, 0, 0, None
out_width_base = clipped_bbox.width * magnification
out_height_base = clipped_bbox.height * magnification
if out_width_base == 0 or out_height_base == 0:
return None, 0, 0, None
if self.origin == 'upper':
# Flip the input image using a transform. This avoids the
# problem with flipping the array, which results in a copy
# when it is converted to contiguous in the C wrapper
t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
else:
t0 = IdentityTransform()
t0 += (
Affine2D()
.scale(
in_bbox.width / A.shape[1],
in_bbox.height / A.shape[0])
.translate(in_bbox.x0, in_bbox.y0)
+ self.get_transform())
t = (t0
+ (Affine2D()
.translate(-clipped_bbox.x0, -clipped_bbox.y0)
.scale(magnification)))
# So that the image is aligned with the edge of the Axes, we want to
# round up the output width to the next integer. This also means
# scaling the transform slightly to account for the extra subpixel.
if ((not unsampled) and t.is_affine and round_to_pixel_border and
(out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
out_width = math.ceil(out_width_base)
out_height = math.ceil(out_height_base)
extra_width = (out_width - out_width_base) / out_width_base
extra_height = (out_height - out_height_base) / out_height_base
t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
else:
out_width = int(out_width_base)
out_height = int(out_height_base)
out_shape = (out_height, out_width)
if not unsampled:
if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in (3, 4)):
raise ValueError(f"Invalid shape {A.shape} for image data")
float_rgba_in = A.ndim == 3 and A.shape[-1] == 4 and A.dtype.kind == 'f'
# if antialiased, this needs to change as window sizes
# change:
interpolation_stage = self._interpolation_stage
if interpolation_stage in ['antialiased', 'auto']:
pos = np.array([[0, 0], [A.shape[1], A.shape[0]]])
disp = t.transform(pos)
dispx = np.abs(np.diff(disp[:, 0])) / A.shape[1]
dispy = np.abs(np.diff(disp[:, 1])) / A.shape[0]
if (dispx < 3) or (dispy < 3):
interpolation_stage = 'rgba'
else:
interpolation_stage = 'data'
if A.ndim == 2 and interpolation_stage == 'data':
# if we are a 2D array, then we are running through the
# norm + colormap transformation. However, in general the
# input data is not going to match the size on the screen so we
# have to resample to the correct number of pixels
if A.dtype.kind == 'f': # Float dtype: scale to same dtype.
scaled_dtype = np.dtype("f8" if A.dtype.itemsize > 4 else "f4")
if scaled_dtype.itemsize < A.dtype.itemsize:
_api.warn_external(f"Casting input data from {A.dtype}"
f" to {scaled_dtype} for imshow.")
else: # Int dtype, likely.
# TODO slice input array first
# Scale to appropriately sized float: use float32 if the
# dynamic range is small, to limit the memory footprint.
da = A.max().astype("f8") - A.min().astype("f8")
scaled_dtype = "f8" if da > 1e8 else "f4"
# resample the input data to the correct resolution and shape
A_resampled = _resample(self, A.astype(scaled_dtype), out_shape, t)
# if using NoNorm, cast back to the original datatype
if isinstance(self.norm, mcolors.NoNorm):
A_resampled = A_resampled.astype(A.dtype)
# Compute out_mask (what screen pixels include "bad" data
# pixels) and out_alpha (to what extent screen pixels are
# covered by data pixels: 0 outside the data extent, 1 inside
# (even for bad data), and intermediate values at the edges).
mask = (np.where(A.mask, np.float32(np.nan), np.float32(1))
if A.mask.shape == A.shape # nontrivial mask
else np.ones_like(A, np.float32))
# we always have to interpolate the mask to account for
# non-affine transformations
out_alpha = _resample(self, mask, out_shape, t, resample=True)
del mask # Make sure we don't use mask anymore!
out_mask = np.isnan(out_alpha)
out_alpha[out_mask] = 1
# Apply the pixel-by-pixel alpha values if present
alpha = self.get_alpha()
if alpha is not None and np.ndim(alpha) > 0:
out_alpha *= _resample(self, alpha, out_shape, t, resample=True)
# mask and run through the norm
resampled_masked = np.ma.masked_array(A_resampled, out_mask)
res = self.norm(resampled_masked)
else:
if A.ndim == 2: # interpolation_stage = 'rgba'
self.norm.autoscale_None(A)
A = self.to_rgba(A)
if A.dtype == np.uint8:
# uint8 is too imprecise for premultiplied alpha roundtrips.
A = np.divide(A, 0xff, dtype=np.float32)
alpha = self.get_alpha()
post_apply_alpha = False
if alpha is None: # alpha parameter not specified
if A.shape[2] == 3: # image has no alpha channel
A = np.dstack([A, np.ones(A.shape[:2])])
elif np.ndim(alpha) > 0: # Array alpha
# user-specified array alpha overrides the existing alpha channel
A = np.dstack([A[..., :3], alpha])
else: # Scalar alpha
if A.shape[2] == 3: # broadcast scalar alpha
A = np.dstack([A, np.full(A.shape[:2], alpha, np.float32)])
else: # or apply scalar alpha to existing alpha channel
post_apply_alpha = True
# Resample in premultiplied alpha space. (TODO: Consider
# implementing premultiplied-space resampling in
# span_image_resample_rgba_affine::generate?)
if float_rgba_in and np.ndim(alpha) == 0 and np.any(A[..., 3] < 1):
# Do not modify original RGBA input
A = A.copy()
A[..., :3] *= A[..., 3:]
res = _resample(self, A, out_shape, t)
np.divide(res[..., :3], res[..., 3:], out=res[..., :3],
where=res[..., 3:] != 0)
if post_apply_alpha:
res[..., 3] *= alpha
# res is now either a 2D array of normed (int or float) data
# or an RGBA array of re-sampled input
output = self.to_rgba(res, bytes=True, norm=False)
# output is now a correctly sized RGBA array of uint8
# Apply alpha *after* if the input was greyscale without a mask
if A.ndim == 2:
alpha = self._get_scalar_alpha()
alpha_channel = output[:, :, 3]
alpha_channel[:] = ( # Assignment will cast to uint8.
alpha_channel.astype(np.float32) * out_alpha * alpha)
else:
if self._imcache is None:
self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
output = self._imcache
# Subset the input image to only the part that will be displayed.
subset = TransformedBbox(clip_bbox, t0.inverted()).frozen()
output = output[
int(max(subset.ymin, 0)):
int(min(subset.ymax + 1, output.shape[0])),
int(max(subset.xmin, 0)):
int(min(subset.xmax + 1, output.shape[1]))]
t = Affine2D().translate(
int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t
return output, clipped_bbox.x0, clipped_bbox.y0, t
def make_image(self, renderer, magnification=1.0, unsampled=False):
"""
Normalize, rescale, and colormap this image's data for rendering using
*renderer*, with the given *magnification*.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
Returns
-------
image : (M, N, 4) `numpy.uint8` array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : `~matplotlib.transforms.Affine2D`
The affine transformation from image to pixel space.
"""
raise NotImplementedError('The make_image method must be overridden')
def _check_unsampled_image(self):
"""
Return whether the image is better to be drawn unsampled.
The derived class needs to override it.
"""
return False
@martist.allow_rasterization
def draw(self, renderer):
# if not visible, declare victory and return
if not self.get_visible():
self.stale = False
return
# for empty images, there is nothing to draw!
if self.get_array().size == 0:
self.stale = False
return
# actually render the image.
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_alpha(self._get_scalar_alpha())
gc.set_url(self.get_url())
gc.set_gid(self.get_gid())
if (renderer.option_scale_image() # Renderer supports transform kwarg.
and self._check_unsampled_image()
and self.get_transform().is_affine):
im, l, b, trans = self.make_image(renderer, unsampled=True)
if im is not None:
trans = Affine2D().scale(im.shape[1], im.shape[0]) + trans
renderer.draw_image(gc, l, b, im, trans)
else:
im, l, b, trans = self.make_image(
renderer, renderer.get_image_magnification())
if im is not None:
renderer.draw_image(gc, l, b, im)
gc.restore()
self.stale = False
def contains(self, mouseevent):
"""Test whether the mouse event occurred within the image."""
if (self._different_canvas(mouseevent)
# This doesn't work for figimage.
or not self.axes.contains(mouseevent)[0]):
return False, {}
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
trans = self.get_transform().inverted()
x, y = trans.transform([mouseevent.x, mouseevent.y])
xmin, xmax, ymin, ymax = self.get_extent()
# This checks xmin <= x <= xmax *or* xmax <= x <= xmin.
inside = (x is not None and (x - xmin) * (x - xmax) <= 0
and y is not None and (y - ymin) * (y - ymax) <= 0)
return inside, {}
def write_png(self, fname):
"""Write the image to png file *fname*."""
im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A,
bytes=True, norm=True)
PIL.Image.fromarray(im).save(fname, format="png")
@staticmethod
def _normalize_image_array(A):
"""
Check validity of image-like input *A* and normalize it to a format suitable for
Image subclasses.
"""
A = cbook.safe_masked_invalid(A, copy=True)
if A.dtype != np.uint8 and not np.can_cast(A.dtype, float, "same_kind"):
raise TypeError(f"Image data of dtype {A.dtype} cannot be "
f"converted to float")
if A.ndim == 3 and A.shape[-1] == 1:
A = A.squeeze(-1) # If just (M, N, 1), assume scalar and apply colormap.
if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in [3, 4]):
raise TypeError(f"Invalid shape {A.shape} for image data")
if A.ndim == 3:
# If the input data has values outside the valid range (after
# normalisation), we issue a warning and then clip X to the bounds
# - otherwise casting wraps extreme values, hiding outliers and
# making reliable interpretation impossible.
high = 255 if np.issubdtype(A.dtype, np.integer) else 1
if A.min() < 0 or high < A.max():
_log.warning(
'Clipping input data to the valid range for imshow with '
'RGB data ([0..1] for floats or [0..255] for integers). '
'Got range [%s..%s].',
A.min(), A.max()
)
A = np.clip(A, 0, high)
# Cast unsupported integer types to uint8
if A.dtype != np.uint8 and np.issubdtype(A.dtype, np.integer):
A = A.astype(np.uint8)
return A
def set_data(self, A):
"""
Set the image array.
Note that this function does *not* update the normalization used.
Parameters
----------
A : array-like or `PIL.Image.Image`
"""
if isinstance(A, PIL.Image.Image):
A = pil_to_array(A) # Needed e.g. to apply png palette.
self._A = self._normalize_image_array(A)
self._imcache = None
self.stale = True
def set_array(self, A):
"""
Retained for backwards compatibility - use set_data instead.
Parameters
----------
A : array-like
"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked by mistake.
self.set_data(A)
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'auto', 'antialiased', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos',
or 'none'.
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
If None, use :rc:`image.interpolation`. If 'none', the image is
shown as is without interpolating. 'none' is only supported in
agg, ps and pdf backends and will fall back to 'nearest' mode
for other backends.
Parameters
----------
s : {'auto', 'nearest', 'bilinear', 'bicubic', 'spline16', \
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', \
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none'} or None
"""
s = mpl._val_or_rc(s, 'image.interpolation').lower()
_api.check_in_list(interpolations_names, interpolation=s)
self._interpolation = s
self.stale = True
def get_interpolation_stage(self):
"""
Return when interpolation happens during the transform to RGBA.
One of 'data', 'rgba', 'auto'.
"""
return self._interpolation_stage
def set_interpolation_stage(self, s):
"""
Set when interpolation happens during the transform to RGBA.
Parameters
----------
s : {'data', 'rgba', 'auto'}, default: :rc:`image.interpolation_stage`
Whether to apply resampling interpolation in data or RGBA space.
If 'auto', 'rgba' is used if the upsampling rate is less than 3,
otherwise 'data' is used.
"""
s = mpl._val_or_rc(s, 'image.interpolation_stage')
_api.check_in_list(['data', 'rgba', 'auto'], s=s)
self._interpolation_stage = s
self.stale = True
def can_composite(self):
"""Return whether the image can be composited with its neighbors."""
trans = self.get_transform()
return (
self._interpolation != 'none' and
trans.is_affine and
trans.is_separable)
def set_resample(self, v):
"""
Set whether image resampling is used.
Parameters
----------
v : bool, default: :rc:`image.resample`
"""
v = mpl._val_or_rc(v, 'image.resample')
self._resample = v
self.stale = True
def get_resample(self):
"""Return whether image resampling is used."""
return self._resample
def set_filternorm(self, filternorm):
"""
Set whether the resize filter normalizes the weights.
See help for `~.Axes.imshow`.
Parameters
----------
filternorm : bool
"""
self._filternorm = bool(filternorm)
self.stale = True
def get_filternorm(self):
"""Return whether the resize filter normalizes the weights."""
return self._filternorm
def set_filterrad(self, filterrad):
"""
Set the resize filter radius (only applicable to some
interpolation schemes).
See help for `~.Axes.imshow`.
Parameters
----------
filterrad : positive float
"""
r = float(filterrad)
if r <= 0:
raise ValueError("The filter radius must be a positive number")
self._filterrad = r
self.stale = True
def get_filterrad(self):
"""Return the filterrad setting."""
return self._filterrad
| _ImageBase |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 21162,
"end": 22015
} | class ____(ProjectUsersMixin, GenericView):
success_message = _("User deleted")
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
username = self.request.POST.get("username")
user = get_object_or_404(
self.get_queryset(),
username=username,
)
if self._is_last_user():
# NOTE: don't include user input in the message, since it's a security risk.
return HttpResponseBadRequest(_("User is the last owner, can't be removed"))
project = self.get_project()
project.users.remove(user)
messages.success(self.request, self.success_message)
if user == request.user:
return HttpResponseRedirect(reverse("projects_dashboard"))
return HttpResponseRedirect(self.get_success_url())
| ProjectUsersDelete |
python | mahmoud__boltons | boltons/tbutils.py | {
"start": 8783,
"end": 13334
} | class ____:
"""The TracebackInfo class provides a basic representation of a stack
trace, be it from an exception being handled or just part of
normal execution. It is basically a wrapper around a list of
:class:`Callpoint` objects representing frames.
Args:
frames (list): A list of frame objects in the stack.
.. note ::
``TracebackInfo`` can represent both exception tracebacks and
non-exception tracebacks (aka stack traces). As a result, there
is no ``TracebackInfo.from_current()``, as that would be
ambiguous. Instead, call :meth:`TracebackInfo.from_frame`
without the *frame* argument for a stack trace, or
:meth:`TracebackInfo.from_traceback` without the *tb* argument
for an exception traceback.
"""
callpoint_type = Callpoint
def __init__(self, frames):
self.frames = frames
@classmethod
def from_frame(cls, frame=None, level=1, limit=None):
"""Create a new TracebackInfo *frame* by recurring up in the stack a
max of *limit* times. If *frame* is unset, get the frame from
:func:`sys._getframe` using *level*.
Args:
frame (types.FrameType): frame object from
:func:`sys._getframe` or elsewhere. Defaults to result
of :func:`sys.get_frame`.
level (int): If *frame* is unset, the desired frame is
this many levels up the stack from the invocation of
this method. Default ``1`` (i.e., caller of this method).
limit (int): max number of parent frames to extract
(defaults to :data:`sys.tracebacklimit`)
"""
ret = []
if frame is None:
frame = sys._getframe(level)
if limit is None:
limit = getattr(sys, 'tracebacklimit', 1000)
n = 0
while frame is not None and n < limit:
item = cls.callpoint_type.from_frame(frame)
ret.append(item)
frame = frame.f_back
n += 1
ret.reverse()
return cls(ret)
@classmethod
def from_traceback(cls, tb=None, limit=None):
"""Create a new TracebackInfo from the traceback *tb* by recurring
up in the stack a max of *limit* times. If *tb* is unset, get
the traceback from the currently handled exception. If no
exception is being handled, raise a :exc:`ValueError`.
Args:
frame (types.TracebackType): traceback object from
:func:`sys.exc_info` or elsewhere. If absent or set to
``None``, defaults to ``sys.exc_info()[2]``, and
raises a :exc:`ValueError` if no exception is
currently being handled.
limit (int): max number of parent frames to extract
(defaults to :data:`sys.tracebacklimit`)
"""
ret = []
if tb is None:
tb = sys.exc_info()[2]
if tb is None:
raise ValueError('no tb set and no exception being handled')
if limit is None:
limit = getattr(sys, 'tracebacklimit', 1000)
n = 0
while tb is not None and n < limit:
item = cls.callpoint_type.from_tb(tb)
ret.append(item)
tb = tb.tb_next
n += 1
return cls(ret)
@classmethod
def from_dict(cls, d):
"Complements :meth:`TracebackInfo.to_dict`."
# TODO: check this.
return cls(d['frames'])
def to_dict(self):
"""Returns a dict with a list of :class:`Callpoint` frames converted
to dicts.
"""
return {'frames': [f.to_dict() for f in self.frames]}
def __len__(self):
return len(self.frames)
def __iter__(self):
return iter(self.frames)
def __repr__(self):
cn = self.__class__.__name__
if self.frames:
frame_part = f' last={self.frames[-1]!r}'
else:
frame_part = ''
return f'<{cn} frames={len(self.frames)}{frame_part}>'
def __str__(self):
return self.get_formatted()
def get_formatted(self):
"""Returns a string as formatted in the traditional Python
built-in style observable when an exception is not caught. In
other words, mimics :func:`traceback.format_tb` and
:func:`traceback.format_stack`.
"""
ret = 'Traceback (most recent call last):\n'
ret += ''.join([f.tb_frame_str() for f in self.frames])
return ret
| TracebackInfo |
python | ansible__ansible | test/integration/targets/ansible-test-integration-targets/test.py | {
"start": 1556,
"end": 2132
} | class ____(unittest.TestCase):
def test_prefixes(self):
try:
command = ['ansible-test', 'integration', '--list-targets']
something = subprocess.run([*command, 'something/'], text=True, capture_output=True, check=True)
self.assertEqual(something.stdout.splitlines(), ['one-part_test', 'two_part_test'])
except subprocess.CalledProcessError as ex:
raise Exception(f'{ex}:\n>>> Standard Output:\n{ex.stdout}\n>>> Standard Error:\n{ex.stderr}') from ex
if __name__ == '__main__':
unittest.main()
| PrefixesTest |
python | sympy__sympy | sympy/functions/elementary/hyperbolic.py | {
"start": 53220,
"end": 57851
} | class ____(InverseHyperbolicFunction):
"""
``acoth(x)`` is the inverse hyperbolic cotangent of ``x``.
The inverse hyperbolic cotangent function.
Examples
========
>>> from sympy import acoth
>>> from sympy.abc import x
>>> acoth(x).diff(x)
1/(1 - x**2)
See Also
========
sympy.functions.elementary.hyperbolic.asinh
sympy.functions.elementary.hyperbolic.acosh
sympy.functions.elementary.hyperbolic.coth
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity or arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return pi*I / 2
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.Zero
i_coeff = _imaginary_unit_as_coefficient(arg)
if i_coeff is not None:
return -I * acot(i_coeff)
else:
if arg.could_extract_minus_sign():
return -cls(-arg)
if arg.is_zero:
return pi*I*S.Half
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return -I*pi/2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
if x0 is S.ComplexInfinity:
return (1/arg).as_leading_term(x)
if x0 is S.NaN:
expr = self.func(arg.as_leading_term(x))
if expr.is_finite:
return expr
else:
return self
# Handling branch points
if x0 in (-S.One, S.One, S.Zero):
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir)
# Handling points lying on branch cuts [-1, 1]
if x0.is_real and (1 - x0**2).is_positive:
ndir = arg.dir(x, cdir if cdir else 1)
if im(ndir).is_negative:
if x0.is_positive:
return self.func(x0) + I*pi
elif im(ndir).is_positive:
if x0.is_negative:
return self.func(x0) - I*pi
else:
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir)
return self.func(x0)
def _eval_nseries(self, x, n, logx, cdir=0): # acoth
arg = self.args[0]
arg0 = arg.subs(x, 0)
# Handling branch points
if arg0 in (S.One, S.NegativeOne):
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
res = super()._eval_nseries(x, n=n, logx=logx)
if arg0 is S.ComplexInfinity:
return res
# Handling points lying on branch cuts [-1, 1]
if arg0.is_real and (1 - arg0**2).is_positive:
ndir = arg.dir(x, cdir if cdir else 1)
if im(ndir).is_negative:
if arg0.is_positive:
return res + I*pi
elif im(ndir).is_positive:
if arg0.is_negative:
return res - I*pi
else:
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
return res
def _eval_rewrite_as_log(self, x, **kwargs):
return (log(1 + 1/x) - log(1 - 1/x)) / 2
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _eval_rewrite_as_atanh(self, x, **kwargs):
return atanh(1/x)
def _eval_rewrite_as_asinh(self, x, **kwargs):
return (pi*I/2*(sqrt((x - 1)/x)*sqrt(x/(x - 1)) - sqrt(1 + 1/x)*sqrt(x/(x + 1))) +
x*sqrt(1/x**2)*asinh(sqrt(1/(x**2 - 1))))
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return coth
def _eval_is_extended_real(self):
return fuzzy_and([self.args[0].is_extended_real, fuzzy_or([(self.args[0] - 1).is_extended_nonnegative, (self.args[0] + 1).is_extended_nonpositive])])
def _eval_is_finite(self):
return fuzzy_not(fuzzy_or([(self.args[0] - 1).is_zero, (self.args[0] + 1).is_zero]))
| acoth |
python | python-attrs__attrs | typing-examples/baseline.py | {
"start": 607,
"end": 652
} | class ____:
a: int
D(1).a
@attrs.define
| D |
python | jina-ai__jina | jina/serve/runtimes/servers/composite.py | {
"start": 191,
"end": 3437
} | class ____(BaseServer):
"""Composite Base Server implementation from which u can inherit a specific custom composite one"""
servers: List['BaseServer']
logger: 'JinaLogger'
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._kwargs = kwargs
@property
def _server_kwargs(self):
ret = []
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
for port, protocol in zip(self.ports, self.protocols):
# ignore monitoring and tracing args since they are not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = port
runtime_args.protocol = protocol
server_kwargs = {
k: v for k, v in self._kwargs.items() if k != 'runtime_args'
}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
ret.append(server_kwargs)
return ret
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
async def setup_server(self):
"""
setup servers inside CompositeServer
"""
self.logger.debug(f'Setting up Composite server')
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
self.logger.debug(f'Composite server setup successful')
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run servers inside CompositeServer forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
| CompositeBaseServer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 12917,
"end": 13135
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "DagsterTypeNotFoundError"
dagster_type_name = graphene.NonNull(graphene.String)
| GrapheneDagsterTypeNotFoundError |
python | davidhalter__jedi | jedi/inference/value/function.py | {
"start": 14427,
"end": 15016
} | class ____(BaseFunctionExecutionContext):
def infer_annotations(self):
# I don't think inferring anonymous executions is a big thing.
# Anonymous contexts are mostly there for the user to work in. ~ dave
return NO_VALUES
def get_filters(self, until_position=None, origin_scope=None):
yield AnonymousFunctionExecutionFilter(
self, self._value,
until_position=until_position,
origin_scope=origin_scope,
)
def get_param_names(self):
return self._value.get_param_names()
| AnonymousFunctionExecution |
python | plotly__plotly.py | plotly/graph_objs/parcoords/_line.py | {
"start": 233,
"end": 20585
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords"
_path_str = "parcoords.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"reversescale",
"showscale",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in `line.color` is set
to a numerical array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `line.color`) or the bounds
set in `line.cmin` and `line.cmax` Has an effect only if in
`line.color` is set to a numerical array. Defaults to `false`
when `line.cmin` and `line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `line.color` is set to a numerical array. Value should have
the same units as in `line.color` and if set, `line.cmin` must
be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `line.cmin`
and/or `line.cmax` to be equidistant to this point. Has an
effect only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color`. Has no
effect when `line.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `line.color` is set to a numerical array. Value should have
the same units as in `line.color` and if set, `line.cmax` must
be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the line color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to `line.cmin`
and `line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to parcoords.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.parcoords.line.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `line.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`line.cmin` and `line.cmax`. Alternatively, `colorscale` may be
a palette name string of the following list: Blackbody,Bluered,
Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portla
nd,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`line.color` is set to a numerical array. If true, `line.cmin`
will correspond to the last color in the array and `line.cmax`
will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `line.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in
`line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `line.color`)
or the bounds set in `line.cmin` and `line.cmax` Has an
effect only if in `line.color` is set to a numerical
array. Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`line.cmin` and/or `line.cmax` to be equidistant to
this point. Has an effect only if in `line.color` is
set to a numerical array. Value should have the same
units as in `line.color`. Has no effect when
`line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmax` must be set as well.
color
Sets the line color. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `line.cmin` and `line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.parcoords.line.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `line.cmin` and `line.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `line.color` is set to a numerical array. If
true, `line.cmin` will correspond to the last color in
the array and `line.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `line.color` is
set to a numerical array.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
reversescale=None,
showscale=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in
`line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `line.color`)
or the bounds set in `line.cmin` and `line.cmax` Has an
effect only if in `line.color` is set to a numerical
array. Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`line.cmin` and/or `line.cmax` to be equidistant to
this point. Has an effect only if in `line.color` is
set to a numerical array. Value should have the same
units as in `line.color`. Has no effect when
`line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmax` must be set as well.
color
Sets the line color. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `line.cmin` and `line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.parcoords.line.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `line.cmin` and `line.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `line.color` is set to a numerical array. If
true, `line.cmin` will correspond to the last color in
the array and `line.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `line.color` is
set to a numerical array.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | spack__spack | lib/spack/spack/platforms/linux.py | {
"start": 232,
"end": 563
} | class ____(Platform):
priority = 90
def __init__(self):
super().__init__("linux")
linux_dist = LinuxDistro()
self.default_os = str(linux_dist)
self.add_operating_system(str(linux_dist), linux_dist)
@classmethod
def detect(cls):
return "linux" in platform.system().lower()
| Linux |
python | ray-project__ray | python/ray/data/_internal/stats.py | {
"start": 43246,
"end": 53498
} | class ____:
operators_stats: List["OperatorStatsSummary"]
iter_stats: "IterStatsSummary"
parents: List["DatasetStatsSummary"]
number: int
dataset_uuid: str
time_total_s: float
base_name: str
extra_metrics: Dict[str, Any]
global_bytes_spilled: int
global_bytes_restored: int
dataset_bytes_spilled: int
streaming_exec_schedule_s: float
def to_string(
self,
already_printed: Optional[Set[str]] = None,
include_parent: bool = True,
add_global_stats=True,
) -> str:
"""Return a human-readable summary of this Dataset's stats.
Args:
already_printed: Set of operator IDs that have already had its stats printed
out.
include_parent: If true, also include parent stats summary; otherwise, only
log stats of the latest operator.
add_global_stats: If true, includes global stats to this summary.
Returns:
String with summary statistics for executing the Dataset.
"""
if already_printed is None:
already_printed = set()
out = ""
if self.parents and include_parent:
for p in self.parents:
parent_sum = p.to_string(already_printed, add_global_stats=False)
if parent_sum:
out += parent_sum
out += "\n"
operators_stats_summary = None
if len(self.operators_stats) == 1:
operators_stats_summary = self.operators_stats[0]
operator_name = operators_stats_summary.operator_name
operator_uuid = self.dataset_uuid + operator_name
out += "Operator {} {}: ".format(self.number, operator_name)
if operator_uuid in already_printed:
out += "[execution cached]\n"
else:
already_printed.add(operator_uuid)
out += str(operators_stats_summary)
elif len(self.operators_stats) > 1:
rounded_total = round(self.time_total_s, 2)
if rounded_total <= 0:
# Handle -0.0 case.
rounded_total = 0
out += "Operator {} {}: executed in {}s\n".format(
self.number, self.base_name, rounded_total
)
for n, operators_stats_summary in enumerate(self.operators_stats):
operator_name = operators_stats_summary.operator_name
operator_uuid = self.dataset_uuid + operator_name
out += "\n"
out += "\tSuboperator {} {}: ".format(n, operator_name)
if operator_uuid in already_printed:
out += "\t[execution cached]\n"
else:
already_printed.add(operator_uuid)
out += str(operators_stats_summary)
verbose_stats_logs = DataContext.get_current().verbose_stats_logs
if verbose_stats_logs and self.extra_metrics:
indent = (
"\t"
if operators_stats_summary and operators_stats_summary.is_sub_operator
else ""
)
out += indent
out += "* Extra metrics: " + str(self.extra_metrics) + "\n"
out += str(self.iter_stats)
if len(self.operators_stats) > 0 and add_global_stats:
mb_spilled = round(self.global_bytes_spilled / 1e6)
mb_restored = round(self.global_bytes_restored / 1e6)
if mb_spilled or mb_restored:
out += "\nCluster memory:\n"
out += "* Spilled to disk: {}MB\n".format(mb_spilled)
out += "* Restored from disk: {}MB\n".format(mb_restored)
dataset_mb_spilled = round(self.dataset_bytes_spilled / 1e6)
if dataset_mb_spilled:
out += "\nDataset memory:\n"
out += "* Spilled to disk: {}MB\n".format(dataset_mb_spilled)
if self.num_rows_per_s:
out += "\n"
out += "Dataset throughput:\n"
out += f"\t* Ray Data throughput: {self.num_rows_per_s} rows/s\n"
if verbose_stats_logs and add_global_stats:
out += "\n" + self.runtime_metrics()
return out
@property
def num_rows_per_s(self) -> float:
"""Calculates the throughput in rows per second for the entire dataset."""
# The observed dataset throughput is computed by dividing the total number
# of rows produced by the total wall time of the dataset (i.e. from start to
# finish how long did the dataset take to be processed). With the recursive
# nature of the DatasetStatsSummary, we use get_total_wall_time to determine
# the total wall time (this finds the difference between the earliest start
# and latest end for any block in any operator).
output_num_rows = (
self.operators_stats[-1].output_num_rows if self.operators_stats else 0
)
total_num_out_rows = output_num_rows["sum"] if output_num_rows else 0
wall_time = self.get_total_wall_time()
if not total_num_out_rows or not wall_time:
return 0.0
return total_num_out_rows / wall_time
@staticmethod
def _collect_dataset_stats_summaries(
curr: "DatasetStatsSummary",
) -> List["DatasetStatsSummary"]:
summs = []
# TODO: Do operators ever have multiple parents? Do we need to deduplicate?
for p in curr.parents:
if p and p.parents:
summs.extend(DatasetStatsSummary._collect_dataset_stats_summaries(p))
return summs + [curr]
@staticmethod
def _find_start_and_end(summ: "DatasetStatsSummary") -> Tuple[float, float]:
earliest_start = min(ops.earliest_start_time for ops in summ.operators_stats)
latest_end = max(ops.latest_end_time for ops in summ.operators_stats)
return earliest_start, latest_end
def runtime_metrics(self) -> str:
total_wall_time = self.get_total_wall_time()
def fmt_line(name: str, time: float) -> str:
fraction = time / total_wall_time if total_wall_time > 0 else 0
return f"* {name}: {fmt(time)} ({fraction * 100:.3f}%)\n"
summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self)
out = "Runtime Metrics:\n"
for summ in summaries:
if len(summ.operators_stats) > 0:
earliest_start, latest_end = DatasetStatsSummary._find_start_and_end(
summ
)
op_total_time = latest_end - earliest_start
out += fmt_line(summ.base_name, op_total_time)
out += fmt_line("Scheduling", self.streaming_exec_schedule_s)
out += fmt_line("Total", total_wall_time)
return out
def __repr__(self, level=0) -> str:
indent = leveled_indent(level)
operators_stats = "\n".join(
[ss.__repr__(level + 2) for ss in self.operators_stats]
)
parent_stats = "\n".join([ps.__repr__(level + 2) for ps in self.parents])
extra_metrics = "\n".join(
f"{leveled_indent(level + 2)}{k}: {v},"
for k, v in self.extra_metrics.items()
)
# Handle formatting case for empty outputs.
operators_stats = (
f"\n{operators_stats},\n{indent} " if operators_stats else ""
)
parent_stats = f"\n{parent_stats},\n{indent} " if parent_stats else ""
extra_metrics = f"\n{extra_metrics}\n{indent} " if extra_metrics else ""
return (
f"{indent}DatasetStatsSummary(\n"
f"{indent} dataset_uuid={self.dataset_uuid},\n"
f"{indent} base_name={self.base_name},\n"
f"{indent} number={self.number},\n"
f"{indent} extra_metrics={{{extra_metrics}}},\n"
f"{indent} operators_stats=[{operators_stats}],\n"
f"{indent} iter_stats={self.iter_stats.__repr__(level+1)},\n"
f"{indent} global_bytes_spilled={self.global_bytes_spilled / 1e6}MB,\n"
f"{indent} global_bytes_restored={self.global_bytes_restored / 1e6}MB,\n"
f"{indent} dataset_bytes_spilled={self.dataset_bytes_spilled / 1e6}MB,\n"
f"{indent} parents=[{parent_stats}],\n"
f"{indent})"
)
def get_total_wall_time(self) -> float:
"""Calculate the total wall time for the dataset, this is done by finding
the earliest start time and latest end time for any block in any operator.
The wall time is the difference of these two times.
"""
start_ends = [
DatasetStatsSummary._find_start_and_end(summ)
for summ in DatasetStatsSummary._collect_dataset_stats_summaries(self)
if len(summ.operators_stats) > 0
]
if len(start_ends) == 0:
return 0
else:
earliest_start = min(start_end[0] for start_end in start_ends)
latest_end = max(start_end[1] for start_end in start_ends)
return latest_end - earliest_start
def get_total_time_all_blocks(self) -> float:
"""Calculate the sum of the wall times across all blocks of all operators."""
summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self)
return sum(
(
sum(
ops.wall_time.get("sum", 0) if ops.wall_time else 0
for ops in summ.operators_stats
)
)
for summ in summaries
)
def get_total_cpu_time(self) -> float:
parent_sum = sum(p.get_total_cpu_time() for p in self.parents)
return parent_sum + sum(
ss.cpu_time.get("sum", 0) for ss in self.operators_stats
)
def get_max_heap_memory(self) -> float:
parent_memory = [p.get_max_heap_memory() for p in self.parents]
parent_max = max(parent_memory) if parent_memory else 0
if not self.operators_stats:
return parent_max
return max(
parent_max,
*[ss.memory.get("max", 0) for ss in self.operators_stats],
)
@dataclass
| DatasetStatsSummary |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 6710,
"end": 7138
} | class ____(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
def test_scalar(self):
assert_(not iscomplexobj(1.0))
assert_(iscomplexobj(1 + 0j))
def test_list(self):
assert_(iscomplexobj([3, 1 + 0j, True]))
assert_(not iscomplexobj([3, 1, True]))
| TestIscomplexobj |
python | pypa__pip | tests/unit/test_exceptions.py | {
"start": 2296,
"end": 8320
} | class ____:
def test_complete(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context="Something went wrong\nvery wrong.",
note_stmt="You did something wrong, which is what caused this error.",
hint_stmt="Do it better next time, by trying harder.",
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
Something went wrong
very wrong.
note: You did something wrong, which is what caused this error.
hint: Do it better next time, by trying harder.
"""
)
def test_complete_color(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke.",
context="Something went wrong\nvery wrong.",
note_stmt="You did something wrong.",
hint_stmt="Do it better next time, by trying harder.",
)
def esc(code: str = "0") -> str:
return f"\x1b[{code}m"
assert rendered_in_ascii(err, color=True) == textwrap.dedent(
f"""\
{esc("1;31")}error{esc("0")}: {esc("1")}test-diagnostic{esc("0")}
Oh no!
It broke.
Something went wrong
very wrong.
{esc("1;35")}note{esc("0")}: You did something wrong.
{esc("1;36")}hint{esc("0")}: Do it better next time, by trying harder.
"""
)
def test_no_context(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context=None,
note_stmt="You did something wrong, which is what caused this error.",
hint_stmt="Do it better next time, by trying harder.",
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
note: You did something wrong, which is what caused this error.
hint: Do it better next time, by trying harder.
"""
)
def test_no_note(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context="Something went wrong\nvery wrong.",
note_stmt=None,
hint_stmt="Do it better next time, by trying harder.",
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
Something went wrong
very wrong.
hint: Do it better next time, by trying harder.
"""
)
def test_no_hint(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context="Something went wrong\nvery wrong.",
note_stmt="You did something wrong, which is what caused this error.",
hint_stmt=None,
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
Something went wrong
very wrong.
note: You did something wrong, which is what caused this error.
"""
)
def test_no_context_no_hint(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context=None,
note_stmt="You did something wrong, which is what caused this error.",
hint_stmt=None,
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
note: You did something wrong, which is what caused this error.
"""
)
def test_no_context_no_note(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context=None,
note_stmt=None,
hint_stmt="Do it better next time, by trying harder.",
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
hint: Do it better next time, by trying harder.
"""
)
def test_no_hint_no_note(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context="Something went wrong\nvery wrong.",
note_stmt=None,
hint_stmt=None,
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
Something went wrong
very wrong.
"""
)
def test_no_hint_no_note_no_context(self) -> None:
err = DiagnosticPipError(
reference="test-diagnostic",
message="Oh no!\nIt broke. :(",
context=None,
hint_stmt=None,
note_stmt=None,
)
assert rendered_in_ascii(err) == textwrap.dedent(
"""\
error: test-diagnostic
Oh no!
It broke. :(
"""
)
def rendered(error: DiagnosticPipError, *, color: bool = False) -> str:
with io.StringIO() as stream:
console = rich.console.Console(
force_terminal=False,
file=stream,
color_system="truecolor" if color else None,
)
console.print(error)
return stream.getvalue()
| TestDiagnosticPipErrorPresentation_ASCII |
python | getsentry__sentry | src/sentry/models/featureadoption.py | {
"start": 8968,
"end": 9774
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization = FlexibleForeignKey("sentry.Organization")
feature_id = models.PositiveIntegerField(choices=[(f.id, str(f.name)) for f in manager.all()])
date_completed = models.DateTimeField(default=timezone.now)
complete = models.BooleanField(default=False)
applicable = models.BooleanField(default=True) # Is this feature applicable to this team?
data = LegacyTextJSONField(default=dict)
objects: ClassVar[FeatureAdoptionManager] = FeatureAdoptionManager()
__repr__ = sane_repr("organization_id", "feature_id", "complete", "applicable")
class Meta:
app_label = "sentry"
db_table = "sentry_featureadoption"
unique_together = (("organization", "feature_id"),)
| FeatureAdoption |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.