language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/transfers/test_hive_to_samba.py | {
"start": 1579,
"end": 4346
} | class ____(TestHiveEnvironment):
def setup_method(self, method):
self.kwargs = dict(
hql="hql",
destination_filepath="destination_filepath",
samba_conn_id="samba_default",
hiveserver2_conn_id="hiveserver2_default",
task_id="test_hive_to_samba_operator",
)
super().setup_method(method)
@patch("airflow.providers.apache.hive.transfers.hive_to_samba.SambaHook")
@patch("airflow.providers.apache.hive.transfers.hive_to_samba.HiveServer2Hook")
@patch("airflow.providers.apache.hive.transfers.hive_to_samba.NamedTemporaryFile")
def test_execute(self, mock_tmp_file, mock_hive_hook, mock_samba_hook):
type(mock_tmp_file).name = PropertyMock(return_value="tmp_file")
mock_tmp_file.return_value.__enter__ = Mock(return_value=mock_tmp_file)
context = {}
HiveToSambaOperator(**self.kwargs).execute(context)
mock_hive_hook.assert_called_once_with(hiveserver2_conn_id=self.kwargs["hiveserver2_conn_id"])
mock_hive_hook.return_value.to_csv.assert_called_once_with(
self.kwargs["hql"],
csv_filepath=mock_tmp_file.name,
hive_conf=context_to_airflow_vars(context),
)
mock_samba_hook.assert_called_once_with(samba_conn_id=self.kwargs["samba_conn_id"])
mock_samba_hook.return_value.push_from_local.assert_called_once_with(
self.kwargs["destination_filepath"], mock_tmp_file.name
)
@pytest.mark.skipif(
"AIRFLOW_RUNALL_TESTS" not in os.environ, reason="Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
@patch("tempfile.tempdir", "/tmp/")
@patch("tempfile._RandomNameSequence.__next__")
@patch(
"airflow.providers.apache.hive.transfers.hive_to_samba.HiveServer2Hook",
side_effect=MockHiveServer2Hook,
)
def test_hive2samba(self, mock_hive_server_hook, mock_temp_dir):
mock_temp_dir.return_value = "tst"
samba_hook = MockSambaHook(self.kwargs["samba_conn_id"])
samba_hook.upload = MagicMock()
with patch(
"airflow.providers.apache.hive.transfers.hive_to_samba.SambaHook", return_value=samba_hook
):
samba_hook.conn.upload = MagicMock()
op = HiveToSambaOperator(
task_id="hive2samba_check",
samba_conn_id="tableau_samba",
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath="test_airflow.csv",
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
samba_hook.conn.upload.assert_called_with("/tmp/tmptst", "test_airflow.csv")
| TestHive2SambaOperator |
python | cython__cython | Cython/Compiler/Symtab.py | {
"start": 99863,
"end": 101779
} | class ____(Scope):
# Namespace of a C struct or union.
def __init__(self, name="?"):
Scope.__init__(self, name, outer_scope=None, parent_scope=None)
def declare_var(self, name, type, pos,
cname=None, visibility='private',
api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None,
allow_pyobject=False, allow_memoryview=False, allow_refcounted=False):
# Add an entry for an attribute.
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cfunction:
type = PyrexTypes.CPtrType(type)
self._reject_pytyping_modifiers(pos, pytyping_modifiers)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject:
if not allow_pyobject:
error(pos, "C struct/union member cannot be a Python object")
elif type.is_memoryviewslice:
if not allow_memoryview:
# Memory views wrap their buffer owner as a Python object.
error(pos, "C struct/union member cannot be a memory view")
elif type.needs_refcounting:
if not allow_refcounted:
error(pos, "C struct/union member cannot be reference-counted type '%s'" % type)
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), overridable=False): # currently no utility code ...
if overridable:
error(pos, "C struct/union member cannot be declared 'cpdef'")
return self.declare_var(name, type, pos,
cname=cname, visibility=visibility)
| StructOrUnionScope |
python | walkccc__LeetCode | solutions/1196. How Many Apples Can You Put into the Basket/1196.py | {
"start": 0,
"end": 209
} | class ____:
def maxNumberOfApples(self, weight: list[int]) -> int:
summ = 0
for i, w in enumerate(sorted(weight)):
summ += w
if summ > 5000:
return i
return len(weight)
| Solution |
python | bokeh__bokeh | src/bokeh/models/text.py | {
"start": 2814,
"end": 4073
} | class ____(MathText):
""" Render mathematical content using `LaTeX <https://www.latex-project.org/>`_
notation.
See :ref:`ug_styling_mathtext` in the |user guide| for more information.
.. note::
Bokeh uses `MathJax <https://www.mathjax.org>`_ to render text
containing mathematical notation.
MathJax only supports math-mode macros (no text-mode macros). You
can see more about differences between standard TeX/LaTeX and MathJax
here: https://docs.mathjax.org/en/latest/input/tex/differences.html
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
macros = Dict(String, Either(String, Tuple(String, Int)), help="""
User defined TeX macros.
This is a mapping from control sequence names (without leading backslash) to
either replacement strings or tuples of a replacement string and a number
of arguments.
Example:
.. code-block:: python
TeX(text=r"\\R \\rightarrow \\R^2", macros={"RR": r"{\\bf R}"})
""")
inline = Bool(default=False, help="""
Whether the math text is inline display or not (for TeX input). Default is False.
""")
| TeX |
python | getsentry__sentry | src/social_auth/backends/visualstudio.py | {
"start": 610,
"end": 1066
} | class ____(OAuthBackend):
"""Visual Studio OAuth authentication backend"""
name = "visualstudio"
EXTRA_DATA = [("id", "id"), ("refresh_token", "refresh_token")]
def get_user_details(self, response):
"""Return user details from Visual Studio account"""
return {
"email": response.get("email"),
"id": response.get("id"),
"full_name": response.get("full_name"),
}
| VisualStudioBackend |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 855204,
"end": 858337
} | class ____(sgqlc.types.Type, Node, Closable, Updatable):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"body",
"body_html",
"columns",
"created_at",
"creator",
"database_id",
"name",
"number",
"owner",
"pending_cards",
"progress",
"resource_path",
"state",
"updated_at",
"url",
)
body = sgqlc.types.Field(String, graphql_name="body")
body_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bodyHTML")
columns = sgqlc.types.Field(
sgqlc.types.non_null(ProjectColumnConnection),
graphql_name="columns",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
creator = sgqlc.types.Field(Actor, graphql_name="creator")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
owner = sgqlc.types.Field(sgqlc.types.non_null(ProjectOwner), graphql_name="owner")
pending_cards = sgqlc.types.Field(
sgqlc.types.non_null(ProjectCardConnection),
graphql_name="pendingCards",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"archived_states",
sgqlc.types.Arg(
sgqlc.types.list_of(ProjectCardArchivedState),
graphql_name="archivedStates",
default=("ARCHIVED", "NOT_ARCHIVED"),
),
),
)
),
)
progress = sgqlc.types.Field(
sgqlc.types.non_null(ProjectProgress), graphql_name="progress"
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
state = sgqlc.types.Field(sgqlc.types.non_null(ProjectState), graphql_name="state")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
| Project |
python | getsentry__sentry | tests/sentry/seer/autofix/test_autofix.py | {
"start": 40838,
"end": 44329
} | class ____(TestCase):
@patch("sentry.seer.autofix.autofix._get_github_username_for_user")
@patch("sentry.seer.autofix.autofix.requests.post")
@patch("sentry.seer.autofix.autofix.sign_with_seer_secret")
def test_call_autofix(self, mock_sign, mock_post, mock_get_username) -> None:
"""Tests the _call_autofix function makes the correct API call."""
# Setup mocks
mock_sign.return_value = {"Authorization": "Bearer test"}
mock_post.return_value.json.return_value = {"run_id": "test-run-id"}
mock_get_username.return_value = None # No GitHub username
# Mock objects
user = Mock()
user.id = 123
user.get_display_name.return_value = "Test User"
group = Mock()
group.organization.id = 456
group.project.id = 789
group.id = 101112
group.title = "Test Group"
group.qualified_short_id = "TEST-123"
now = datetime.now()
group.first_seen = now
# Test data
repos = [{"name": "test-repo"}]
serialized_event = {"event_id": "test-event"}
profile = {"profile_data": "test"}
trace_tree = {"trace_data": "test"}
logs = {"logs": [{"message": "test-log"}]}
tags_overview = {"tags": [{"key": "environment", "top_values": []}]}
instruction = "Test instruction"
# Call the function with keyword arguments
run_id = _call_autofix(
user=user,
group=group,
repos=repos,
serialized_event=serialized_event,
profile=profile,
trace_tree=trace_tree,
logs=logs,
tags_overview=tags_overview,
instruction=instruction,
timeout_secs=TIMEOUT_SECONDS,
pr_to_comment_on_url="https://github.com/getsentry/sentry/pull/123",
)
# Verify the result
assert run_id == "test-run-id"
# Verify the API call
mock_post.assert_called_once()
url = mock_post.call_args[0][0]
assert "/v1/automation/autofix/start" in url
# Verify the request body
body = orjson.loads(mock_post.call_args[1]["data"])
assert body["organization_id"] == 456
assert body["project_id"] == 789
assert body["repos"] == repos
assert body["issue"]["id"] == 101112
assert body["issue"]["title"] == "Test Group"
assert body["issue"]["short_id"] == "TEST-123"
assert body["issue"]["first_seen"] == now.isoformat()
assert body["issue"]["events"] == [serialized_event]
assert body["profile"] == profile
assert body["trace_tree"] == trace_tree
assert body["logs"] == logs
assert body["tags_overview"] == tags_overview
assert body["instruction"] == "Test instruction"
assert body["timeout_secs"] == TIMEOUT_SECONDS
assert body["invoking_user"]["id"] == 123
assert body["invoking_user"]["display_name"] == "Test User"
assert body["invoking_user"]["github_username"] is None
assert (
body["options"]["comment_on_pr_with_url"]
== "https://github.com/getsentry/sentry/pull/123"
)
assert body["options"]["disable_coding_step"] is False
# Verify headers
headers = mock_post.call_args[1]["headers"]
assert headers["content-type"] == "application/json;charset=utf-8"
assert headers["Authorization"] == "Bearer test"
| TestCallAutofix |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 72996,
"end": 77020
} | class ____(BaseTest):
def test_from_triple(self):
f = llvm.Target.from_triple
with self.assertRaises(RuntimeError) as cm:
f("foobar")
self.assertIn("No available targets are compatible with",
str(cm.exception))
triple = llvm.get_default_triple()
target = f(triple)
self.assertEqual(target.triple, triple)
target.close()
def test_create_target_machine(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
# With the default settings
target.create_target_machine('', '', 1, 'default', 'default')
# With the host's CPU
cpu = llvm.get_host_cpu_name()
target.create_target_machine(cpu, '', 1, 'default', 'default')
def test_name(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.name, str)
self.assertEqual(t.name, u.name)
def test_description(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.description, str)
self.assertEqual(t.description, u.description)
def test_str(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
s = str(target)
self.assertIn(target.name, s)
self.assertIn(target.description, s)
def test_get_parts_from_triple(self):
# Tests adapted from llvm-14::llvm/unittests/ADT/TripleTest.cpp
cases = [
("x86_64-scei-ps4",
llvm.targets.Triple(Arch="x86_64", SubArch='',
Vendor="scei", OS="ps4",
Env="unknown", ObjectFormat="ELF")),
("x86_64-sie-ps4",
llvm.targets.Triple(Arch="x86_64", SubArch='',
Vendor="scei", OS="ps4",
Env="unknown", ObjectFormat="ELF")),
("powerpc-dunno-notsure",
llvm.targets.Triple(Arch="powerpc", SubArch='',
Vendor="unknown", OS="unknown",
Env="unknown", ObjectFormat="ELF")),
("powerpcspe-unknown-freebsd",
llvm.targets.Triple(Arch="powerpc", SubArch='spe',
Vendor="unknown", OS="freebsd",
Env="unknown", ObjectFormat="ELF")),
("armv6hl-none-linux-gnueabi",
llvm.targets.Triple(Arch="arm", SubArch='v6hl',
Vendor="unknown", OS="linux",
Env="gnueabi", ObjectFormat="ELF")),
("i686-unknown-linux-gnu",
llvm.targets.Triple(Arch="i386", SubArch='',
Vendor="unknown", OS="linux",
Env="gnu", ObjectFormat="ELF")),
("i686-apple-macosx",
llvm.targets.Triple(Arch="i386", SubArch='',
Vendor="apple", OS="macosx",
Env="unknown", ObjectFormat="MachO")),
("i686-dunno-win32",
llvm.targets.Triple(Arch="i386", SubArch='',
Vendor="unknown", OS="windows",
Env="msvc", ObjectFormat="COFF")),
("s390x-ibm-zos",
llvm.targets.Triple(Arch="s390x", SubArch='',
Vendor="ibm", OS="zos",
Env="unknown", ObjectFormat="GOFF")),
("wasm64-wasi",
llvm.targets.Triple(Arch="wasm64", SubArch='',
Vendor="unknown", OS="wasi",
Env="unknown", ObjectFormat="Wasm")),
]
for case in cases:
triple_str, triple_obj = case
res = llvm.get_triple_parts(triple_str)
self.assertEqual(res, triple_obj)
| TestTarget |
python | pennersr__django-allauth | allauth/socialaccount/providers/tumblr/views.py | {
"start": 186,
"end": 373
} | class ____(OAuth):
url = "https://api.tumblr.com/v2/user/info"
def get_user_info(self):
data = self.query(self.url).json()
return data["response"]["user"]
| TumblrAPI |
python | matplotlib__matplotlib | lib/matplotlib/offsetbox.py | {
"start": 36920,
"end": 38188
} | class ____(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, *, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : str
Text.
loc : str
Location code. See `AnchoredOffsetbox`.
pad : float, default: 0.4
Padding around the text as fraction of the fontsize.
borderpad : float, default: 0.5
Spacing between the offsetbox frame and the *bbox_to_anchor*.
prop : dict, optional
Dictionary of keyword parameters to be passed to the
`~matplotlib.text.Text` instance contained inside AnchoredText.
**kwargs
All other parameters are passed to `AnchoredOffsetbox`.
"""
if prop is None:
prop = {}
badkwargs = {'va', 'verticalalignment'}
if badkwargs & set(prop):
raise ValueError(
'Mixing verticalalignment with AnchoredText is not supported.')
self.txt = TextArea(s, textprops=prop)
fp = self.txt._text.get_fontproperties()
super().__init__(
loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp,
**kwargs)
| AnchoredText |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_query.py | {
"start": 10477,
"end": 11794
} | class ____(fixtures.TestBase):
__only_on__ = "mysql >= 5.7", "mariadb"
__backend__ = True
@combinations(
(True),
(False),
(None),
("unset"),
argnames="nullable",
)
def test_column_computed_for_nullable(self, connection, nullable):
"""test #10056
we want to make sure that nullable is always set to True for computed
column as it is not supported for mariaDB
ref: https://mariadb.com/kb/en/generated-columns/#statement-support
"""
m = MetaData()
kwargs = {"nullable": nullable} if nullable != "unset" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2"), **kwargs),
)
if connection.engine.dialect.name == "mariadb" and nullable in (
False,
None,
):
assert_raises(
exc.ProgrammingError,
connection.execute,
schema.CreateTable(t),
)
# If assertion happens table won't be created so
# return from test
return
# Create and then drop table
connection.execute(schema.CreateTable(t))
connection.execute(schema.DropTable(t))
| ComputedTest |
python | sphinx-doc__sphinx | sphinx/search/pt.py | {
"start": 199,
"end": 617
} | class ____(SearchLanguage):
lang = 'pt'
language_name = 'Portuguese'
js_stemmer_rawcode = 'portuguese-stemmer.js'
stopwords = PORTUGUESE_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('portuguese')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
| SearchPortuguese |
python | fastai__fastai | fastai/callback/azureml.py | {
"start": 317,
"end": 2694
} | class ____(Callback):
"""
Log losses, metrics, model architecture summary to AzureML.
If `log_offline` is False, will only log if actually running on AzureML.
A custom AzureML `Run` class can be passed as `azurerun`.
If `log_to_parent` is True, will also log to the parent run, if exists (e.g. in AzureML pipelines).
"""
order = Recorder.order+1
def __init__(self, azurerun=None, log_to_parent=True):
if azurerun:
self.azurerun = azurerun
else:
try:
self.azurerun = Run.get_context(allow_offline=False)
except RunEnvironmentException:
# running locally
self.azurerun = None
warnings.warn("Not running on AzureML and no azurerun passed, AzureMLCallback will be disabled.")
self.log_to_parent = log_to_parent
def before_fit(self):
self._log("n_epoch", self.learn.n_epoch)
self._log("model_class", str(type(self.learn.model)))
try:
summary_file = Path("outputs") / 'model_summary.txt'
with summary_file.open("w") as f:
f.write(repr(self.learn.model))
except:
print('Did not log model summary. Check if your model is PyTorch model.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self._log('batch__loss', self.learn.loss.item())
self._log('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self._log(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']:
self._log(f'epoch__{n}', v)
if n == 'time':
# split elapsed time string, then convert into 'seconds' to log
m, s = str(v).split(':')
elapsed = int(m)*60 + int(s)
self._log(f'epoch__{n}', elapsed)
def _log(self, metric, value):
if self.azurerun is not None:
self.azurerun.log(metric, value)
if self.log_to_parent and self.azurerun.parent is not None:
self.azurerun.parent.log(metric, value) | AzureMLCallback |
python | readthedocs__readthedocs.org | readthedocs/projects/admin.py | {
"start": 12984,
"end": 13313
} | class ____(admin.ModelAdmin):
"""Admin view for :py:class:`ImportedFile`."""
raw_id_fields = ("project", "version")
list_display = ("path", "version", "build")
list_select_related = ("project", "version", "version__project")
search_fields = ("project__slug", "version__slug", "path", "build")
| ImportedFileAdmin |
python | walkccc__LeetCode | solutions/359. Logger Rate Limiter/359-2.py | {
"start": 0,
"end": 276
} | class ____:
def __init__(self):
self.okTime = {} # {message: ok time}
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
if timestamp < self.okTime.get(message, 0):
return False
self.okTime[message] = timestamp + 10
return True
| Logger |
python | sympy__sympy | sympy/polys/rings.py | {
"start": 6540,
"end": 24916
} | class ____(DefaultPrinting, IPolys[Er], Generic[Er]):
"""Multivariate distributed polynomial ring."""
symbols: tuple[Expr, ...]
gens: tuple[PolyElement[Er], ...]
ngens: int
_gens_set: set[PolyElement]
domain: Domain[Er]
order: MonomialOrder
_hash: int
_hash_tuple: tuple
_one: list[tuple[Mon, Er]]
dtype: Callable[[Iterable[tuple[Mon, Er]] | dict[Mon, Er]], PolyElement[Er]]
monomial_mul: Callable[[Mon, Mon], Mon]
monomial_pow: Callable[[Mon, int], Mon]
monomial_mulpow: Callable[[Mon, Mon, int], Mon]
monomial_ldiv: Callable[[Mon, Mon], Mon]
monomial_div: Callable[[Mon, Mon], Mon]
monomial_lcm: Callable[[Mon, Mon], Mon]
monomial_gcd: Callable[[Mon, Mon], Mon]
leading_expv: Callable[[PolyElement[Er]], Mon]
zero_monom: Mon
@overload
def __new__(
cls,
symbols: str | Expr | Sequence[str] | Sequence[Expr],
domain: Domain[Er],
order: str | MonomialOrder | None = lex,
) -> PolyRing[Er]: ...
@overload
def __new__(
cls,
symbols: str | Expr | Sequence[str] | Sequence[Expr],
domain: PolyRing[Es],
order: str | MonomialOrder | None = lex,
) -> PolyRing[PolyElement[Es]]: ...
def __new__( # type: ignore
cls,
symbols: str | Expr | Sequence[str] | Sequence[Expr],
domain: Domain[Er] | PolyRing[Es],
order: str | MonomialOrder | None = lex,
) -> PolyRing[Er] | PolyRing[PolyElement[Es]]:
# Create a new ring instance.
symbols = tuple(_parse_symbols(symbols))
ngens = len(symbols)
domain = DomainOpt.preprocess(domain)
morder = OrderOpt.preprocess(order)
# Validate that symbols do not overlap with domain symbols
if isinstance(domain, CompositeDomain) and set(symbols) & set(domain.symbols):
raise GeneratorsError(
"polynomial ring and it's ground domain share generators"
)
# Create and initialize instance
obj = object.__new__(cls)
obj._hash_tuple = (cls.__name__, symbols, ngens, domain, order)
obj._hash = hash(obj._hash_tuple)
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = morder
# Set up polynomial creation and basic elements
obj.dtype = PolyElement(obj, ()).new
obj.zero_monom = (0,) * ngens
obj.gens = obj._gens()
obj._gens_set = set(obj.gens)
obj._one = [(obj.zero_monom, domain.one)]
# Initialize monomial operations
obj._init_monomial_operations()
# Set up leading exponent function
obj._init_leading_expv_function(order)
# Add generator attributes for Symbol names
obj._add_generator_attributes()
return obj
def _init_monomial_operations(self) -> None:
# Initialize monomial operations based on number of generators.
if self.ngens:
# Operations for rings with at least one variable
codegen = MonomialOps(self.ngens)
self.monomial_mul = codegen.mul()
self.monomial_pow = codegen.pow()
self.monomial_mulpow = codegen.mulpow()
self.monomial_ldiv = codegen.ldiv()
self.monomial_div = codegen.div()
self.monomial_lcm = codegen.lcm()
self.monomial_gcd = codegen.gcd()
else:
# No variables, all operations return empty tuple
monunit = lambda a, b: ()
self.monomial_mul = monunit
self.monomial_pow = monunit
self.monomial_mulpow = lambda a, b, c: ()
self.monomial_ldiv = monunit
self.monomial_div = monunit
self.monomial_lcm = monunit
self.monomial_gcd = monunit
def _init_leading_expv_function(self, order) -> None:
# Initialize the leading exponent vector function.
if order is lex:
self.leading_expv = max
else:
self.leading_expv = lambda f: max(f, key=order)
def _add_generator_attributes(self) -> None:
"""Add generator attributes for Symbol names."""
for symbol, generator in zip(self.symbols, self.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(self, name):
setattr(self, name, generator)
# Pickle support
def __getnewargs__(self) -> tuple[tuple[Expr, ...], Domain[Er], MonomialOrder]:
return self.symbols, self.domain, self.order
# Hash and equality
def __hash__(self) -> int:
return self._hash
def __eq__(self, other):
return isinstance(other, PolyRing) and self._ring_equality(other)
def __ne__(self, other):
return not self == other
@overload
def __getitem__(self, key: int) -> PolyRing[Er]: ...
@overload
def __getitem__(self, key: slice) -> PolyRing[Er] | Domain[Er]: ...
def __getitem__(self, key: slice | int) -> PolyRing[Er] | Domain[Er]:
# Get a subring with subset of symbols.
symbols = self.symbols[key]
if not symbols:
return self.domain
else:
return self.clone(symbols=symbols)
# Properties
@property
def zero(self) -> PolyElement[Er]:
"""The zero polynomial."""
return self.dtype([])
@property
def one(self) -> PolyElement[Er]:
"""The unit polynomial."""
return self.dtype(self._one)
@property
def is_univariate(self) -> bool:
"""True if this is a univariate ring."""
return self.ngens == 1
@property
def is_multivariate(self) -> bool:
"""True if this is a multivariate ring."""
return self.ngens > 1
# Ring operations and cloning
@overload
def clone(
self,
symbols: Expr | list[Expr] | tuple[Expr, ...] | None = None,
domain: None = None,
order: None = None,
) -> PolyRing[Er]: ...
@overload
def clone(
self,
symbols: Expr | list[Expr] | tuple[Expr, ...] | None = None,
*,
domain: Domain[Es],
order: None = None,
) -> PolyRing[Es]: ...
@overload
def clone(
self,
symbols: Expr | list[Expr] | tuple[Expr, ...] | None = None,
*,
domain: PolyRing[Es],
order: None = None,
) -> PolyRing[PolyElement[Es]]: ...
def clone(
self,
symbols: Expr | list[Expr] | tuple[Expr, ...] | None = None,
domain: PolyRing[Es] | Domain[Es] | None = None,
order: str | MonomialOrder | None = None,
) -> PolyRing[Er] | PolyRing[Es] | PolyRing[PolyElement[Es]]:
"""Create a clone with modified parameters."""
# Convert list to tuple for hashability
if symbols is not None and isinstance(symbols, list):
symbols = tuple(symbols)
return self._clone(symbols, domain, order)
@cacheit
def _clone(
self,
symbols: Expr | tuple[Expr, ...] | None,
domain: PolyRing[Es] | Domain[Et] | None,
order: str | MonomialOrder | None,
) -> PolyRing[Er] | PolyRing[Et] | PolyRing[PolyElement[Es]]:
return PolyRing(
symbols or self.symbols, domain or self.domain, order or self.order
)
def compose(self, other: PolyRing[Er]) -> PolyRing[Er]:
"""Add the generators of other ring to this ring."""
if self != other:
syms = set(self.symbols).union(set(other.symbols))
return self.clone(symbols=list(syms))
else:
return self
# Domain conversions
def to_domain(self) -> PolynomialRing[Er]:
"""Convert to a domain."""
return PolynomialRing(self)
def to_field(self) -> FracField[Er]:
"""Convert to a field of fractions."""
from sympy.polys.fields import FracField
return FracField(self.symbols, self.domain, self.order)
def to_ground(self: PolyRing[PolyElement[Es]]) -> PolyRing[Es]:
"""Convert to ground domain."""
if isinstance(self.domain, CompositeDomain) or hasattr(self.domain, "domain"):
return self.clone(domain=self.domain.domain)
else:
raise ValueError(f"{self.domain} is not a composite domain")
# Element creation and testing
def is_element(self, element) -> TypeIs[PolyElement[Er]]:
"""Check if element belongs to this ring."""
return isinstance(element, PolyElement) and element.ring == self
def domain_new(self, element, orig_domain=None) -> Er:
"""Create a new element of the ground domain."""
return self.domain.convert(element, orig_domain)
def ground_new(self, coeff) -> PolyElement[Er]:
"""Create a constant polynomial with given coefficient."""
return self.term_new(self.zero_monom, coeff)
def term_new(self, monom: Mon, coeff: int | Er) -> PolyElement[Er]:
"""Create a polynomial with a single term."""
coeff = self.domain_new(coeff)
poly = self.zero
if coeff:
poly[monom] = coeff
return poly
# Polynomial creation from various formats
def from_dict(
self,
element: Mapping[Mon, int | Er | Expr] | PolyElement[Er],
orig_domain: Domain[Er] | None = None,
) -> PolyElement[Er]:
"""Create polynomial from dictionary of monomials to coefficients."""
if not isinstance(element, dict):
raise TypeError(
"Input must be a dictionary mapping monomials to coefficients"
)
return self._from_dict_ground(element, orig_domain)
def from_terms(
self, element: Iterable[tuple[Mon, Er]], orig_domain: Domain[Er] | None = None
) -> PolyElement[Er]:
"""Create polynomial from sequence of (monomial, coefficient) pairs."""
return self.from_dict(dict(element), orig_domain)
def from_list(self, element: dmp[Er]) -> PolyElement[Er]:
"""Create polynomial from list(dense) representation."""
poly_dict = dmp_to_dict(element, self.ngens - 1, self.domain)
return self.from_dict(poly_dict)
def from_expr(self, expr) -> PolyElement[Er]:
"""Create polynomial from SymPy expression."""
mapping = dict(zip(self.symbols, self.gens))
try:
poly = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError(
f"expected an expression convertible to a polynomial in {self}, "
f"got {expr}"
)
else:
return self.ring_new(poly)
def _rebuild_expr(self, expr, mapping) -> PolyElement[Er]:
# Rebuild expression as polynomial.
domain = self.domain
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, map(_rebuild, expr.args))
elif expr.is_Mul:
return reduce(mul, map(_rebuild, expr.args))
else:
# Handle powers and other expressions
base, exp = expr.as_base_exp()
if exp.is_Integer and exp > 1:
return _rebuild(base) ** int(exp)
else:
return self.ground_new(domain.convert(expr))
return _rebuild(sympify(expr))
# Generator operations
def monomial_basis(self, i) -> tuple[int, ...]:
"""Return the i-th basis element."""
basis = [0] * self.ngens
basis[i] = 1
return tuple(basis)
def index(self, gen: PolyElement[Er] | int | str | None) -> int:
"""Get index of generator in the ring."""
if gen is None:
return 0 if self.ngens else -1 # Impossible choice indicator
elif isinstance(gen, (int, str)):
return self._gen_index(gen)
elif self.is_element(gen):
try:
return self.gens.index(gen)
except ValueError:
raise ValueError(f"invalid generator: {gen}")
else:
raise ValueError(
f"expected a polynomial generator, an integer, a string or None, "
f"got {gen}"
)
def _gen_index(self, gen: int | str) -> int:
# Get generator index from int or string.
if isinstance(gen, int):
if 0 <= gen < self.ngens:
return gen
else:
raise ValueError(f"invalid generator index: {gen}")
else:
try:
return self.symbols.index(gen)
except ValueError:
raise ValueError(f"invalid generator: {gen}")
def add_gens(self, symbols: Iterable[Symbol]) -> PolyRing[Er]:
"""Add new generators to the ring."""
syms = set(self.symbols).union(set(symbols))
return self.clone(symbols=list(syms))
def drop(self, *gens: PolyElement[Er] | int | str) -> PolyRing[Er] | Domain[Er]:
"""Remove specified generators from the ring."""
indices = set(map(self.index, gens))
symbols = [s for i, s in enumerate(self.symbols) if i not in indices]
if not symbols:
return self.domain
else:
return self.clone(symbols=symbols)
def drop_to_ground(
self, *gens: PolyElement[Er] | int | str | None
) -> PolyRing[PolyElement[Er]] | PolyRing[Er]:
"""Remove generators and inject them into the ground domain."""
indices = set(map(self.index, gens))
symbols = [s for i, s in enumerate(self.symbols) if i not in indices]
gens_to_drop = [gen for i, gen in enumerate(self.gens) if i not in indices]
if not symbols:
return self
else:
return self.clone(symbols=symbols, domain=self.drop(*gens_to_drop))
# Polynomial operations
def add(self, *objs):
"""
Add a sequence of polynomials or containers of polynomials.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> R, x = ring("x", ZZ)
>>> R.add([ x**2 + 2*i + 3 for i in range(4) ])
4*x**2 + 24
>>> _.factor_list()
(4, [(x**2 + 6, 1)])
"""
result = self.zero
for obj in objs:
if is_sequence(obj, include=GeneratorType):
result += self.add(*obj)
else:
result += obj
return result
def mul(self, *objs):
"""
Multiply a sequence of polynomials or containers of polynomials.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> R, x = ring("x", ZZ)
>>> R.mul([ x**2 + 2*i + 3 for i in range(4) ])
x**8 + 24*x**6 + 206*x**4 + 744*x**2 + 945
>>> _.factor_list()
(1, [(x**2 + 3, 1), (x**2 + 5, 1), (x**2 + 7, 1), (x**2 + 9, 1)])
"""
result = self.one
for obj in objs:
if is_sequence(obj, include=GeneratorType):
result *= self.mul(*obj)
else:
result *= obj
return result
def symmetric_poly(self, n: int) -> PolyElement[Er]:
"""Return the elementary symmetric polynomial of degree n."""
if n < 0 or n > self.ngens:
raise ValueError(
f"Cannot generate symmetric polynomial of order {n} for {self.gens}"
)
elif not n:
return self.one
else:
poly = self.zero
for s in subsets(range(self.ngens), int(n)):
monom = tuple(int(i in s) for i in range(self.ngens))
poly += self.term_new(monom, self.domain.one)
return poly
# Main element creation method
def ring_new(self, element) -> PolyElement[Er]:
"""Create a ring element from various input types."""
if isinstance(element, PolyElement):
if self == element.ring:
return element
elif (
isinstance(self.domain, PolynomialRing)
and self.domain.ring == element.ring
):
return self.ground_new(element)
else:
raise NotImplementedError("conversion")
elif isinstance(element, str):
raise NotImplementedError("parsing")
elif isinstance(element, dict):
return self.from_dict(element)
elif isinstance(element, list):
try:
return self.from_terms(element)
except ValueError:
return self.from_list(element)
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = ring_new
# Serialization support
def __getstate__(self):
state = self.__dict__.copy()
del state["leading_expv"]
for key in state:
if key.startswith("monomial_"):
del state[key]
return state
# Internal helper methods
def _gens(self) -> tuple[PolyElement[Er], ...]:
# Generate the polynomial generators.
one = self.domain.one
generators = []
for i in range(self.ngens):
expv = self.monomial_basis(i)
poly = self.zero
poly[expv] = one
generators.append(poly)
return tuple(generators)
def _ring_equality(self, other: PolyRing[Er]) -> bool:
# Check equality of two polynomial rings.
return (self.symbols, self.domain, self.ngens, self.order) == (
other.symbols,
other.domain,
other.ngens,
other.order,
)
def _from_dict_ground(
self, element: Mapping[Mon, int | Er | Expr], orig_domain=None
) -> PolyElement[Er]:
# Create polynomial from dictionary with ground domain conversion.
poly = self.zero
domain_new = self.domain_new
for monom, coeff in element.items():
if coeff: # Skip zero coefficients
coeff = domain_new(coeff, orig_domain)
poly[monom] = coeff
return poly
| PolyRing |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 29764,
"end": 30306
} | class ____(BaseSerializationTest):
"""
tests more combinations of parameters
"""
def test_nf4_single_safe(self):
self.test_serialization(quant_type="nf4", double_quant=False)
# nf4 double safetensors quantization is tested in test_serialization() method from the parent class
def test_fp4_single_safe(self):
self.test_serialization(quant_type="fp4", double_quant=False)
def test_fp4_double_safe(self):
self.test_serialization(quant_type="fp4", double_quant=True)
| ExtendedSerializationTest |
python | huggingface__transformers | src/transformers/models/eomt/modeling_eomt.py | {
"start": 45545,
"end": 54055
} | class ____(EomtPreTrainedModel):
main_input_name = "pixel_values"
def __init__(self, config: EomtConfig):
super().__init__(config)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = EomtEmbeddings(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.query = nn.Embedding(config.num_queries, config.hidden_size)
self.layers = nn.ModuleList([EomtLayer(config) for _ in range(config.num_hidden_layers)])
self.upscale_block = EomtScaleBlock(config)
self.mask_head = EomtMaskHead(config)
self.class_predictor = nn.Linear(config.hidden_size, config.num_labels + 1)
self.grid_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.weight_dict: dict[str, float] = {
"loss_cross_entropy": config.class_weight,
"loss_mask": config.mask_weight,
"loss_dice": config.dice_weight,
}
self.criterion = EomtLoss(config=config, weight_dict=self.weight_dict)
self.register_buffer("attn_mask_probs", torch.ones(config.num_blocks))
self.post_init()
def get_loss_dict(
self,
masks_queries_logits: Tensor,
class_queries_logits: Tensor,
mask_labels: Tensor,
class_labels: Tensor,
auxiliary_predictions: dict[str, Tensor],
) -> dict[str, Tensor]:
loss_dict: dict[str, Tensor] = self.criterion(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
auxiliary_predictions=auxiliary_predictions,
)
# weight each loss by `self.weight_dict[<LOSS_NAME>]` including auxiliary losses
for key, weight in self.weight_dict.items():
for loss_key, loss in loss_dict.items():
if key in loss_key:
loss *= weight
return loss_dict
def get_loss(self, loss_dict: dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())
@check_model_inputs()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
mask_labels: Optional[list[Tensor]] = None,
class_labels: Optional[list[Tensor]] = None,
patch_offsets: Optional[list[Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> EomtForUniversalSegmentationOutput:
r"""
mask_labels (`list[torch.Tensor]`, *optional*):
list of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
patch_offsets (`list[torch.Tensor]`, *optional*):
list of tuples indicating the image index and start and end positions of patches for semantic segmentation.
"""
masks_queries_logits_per_layer, class_queries_logits_per_layer = (), ()
attention_mask = None
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
for idx, layer_module in enumerate(self.layers):
if idx == self.num_hidden_layers - self.config.num_blocks:
query = self.query.weight[None, :, :].expand(hidden_states.shape[0], -1, -1).to(hidden_states.device)
hidden_states = torch.cat((query, hidden_states), dim=1)
if idx >= self.num_hidden_layers - self.config.num_blocks and (
self.training or self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks] > 0
):
norm_hidden_states = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(norm_hidden_states)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
attention_mask = torch.ones(
hidden_states.shape[0],
hidden_states.shape[1],
hidden_states.shape[1],
device=hidden_states.device,
dtype=torch.bool,
)
interpolated_logits = F.interpolate(masks_queries_logits, size=self.grid_size, mode="bilinear")
interpolated_logits = interpolated_logits.view(
interpolated_logits.size(0), interpolated_logits.size(1), -1
)
num_query_tokens = self.config.num_queries
encoder_start_tokens = num_query_tokens + self.embeddings.num_prefix_tokens
# Set attention mask for queries to focus on encoder tokens based on interpolated logits
attention_mask[:, :num_query_tokens, encoder_start_tokens:] = interpolated_logits > 0
# Disable attention mask for random query tokens.
attention_mask = self._disable_attention_mask(
attention_mask,
prob=self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks],
num_query_tokens=num_query_tokens,
encoder_start_tokens=encoder_start_tokens,
device=attention_mask.device,
)
# Expand attention mask to 4d mask.
attention_mask = attention_mask[:, None, ...].expand(-1, self.config.num_attention_heads, -1, -1)
attention_mask = attention_mask.float().masked_fill(~attention_mask, -1e9)
hidden_states = layer_module(hidden_states, attention_mask)
sequence_output = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(sequence_output)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
loss = None
if mask_labels is not None and class_labels is not None:
loss = 0.0
for masks_queries_logits, class_queries_logits in zip(
masks_queries_logits_per_layer, class_queries_logits_per_layer
):
loss_dict = self.get_loss_dict(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
auxiliary_predictions=None,
)
loss += self.get_loss(loss_dict)
return EomtForUniversalSegmentationOutput(
loss=loss,
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
last_hidden_state=sequence_output,
patch_offsets=patch_offsets,
)
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def predict(self, logits: torch.Tensor):
query_tokens = logits[:, : self.config.num_queries, :]
class_logits = self.class_predictor(query_tokens)
prefix_tokens = logits[:, self.config.num_queries + self.embeddings.num_prefix_tokens :, :]
prefix_tokens = prefix_tokens.transpose(1, 2)
prefix_tokens = prefix_tokens.reshape(prefix_tokens.shape[0], -1, *self.grid_size)
query_tokens = self.mask_head(query_tokens)
prefix_tokens = self.upscale_block(prefix_tokens)
mask_logits = torch.einsum("bqc, bchw -> bqhw", query_tokens, prefix_tokens)
return mask_logits, class_logits
@staticmethod
def _disable_attention_mask(attn_mask, prob, num_query_tokens, encoder_start_tokens, device):
if prob < 1:
# Generate random queries to disable based on the probs
random_queries = torch.rand(attn_mask.shape[0], num_query_tokens, device=device) > prob
# Disable attention to the query tokens, considering the prefix tokens
attn_mask[:, :num_query_tokens, encoder_start_tokens:][random_queries] = 1
return attn_mask
__all__ = ["EomtPreTrainedModel", "EomtForUniversalSegmentation"]
| EomtForUniversalSegmentation |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_health/asset_check_health.py | {
"start": 9202,
"end": 9334
} | class ____:
num_warning_checks: int
total_num_checks: int
@whitelist_for_serdes
@record.record
| AssetHealthCheckWarningMetadata |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 124197,
"end": 129764
} | class ____(OneFormerPreTrainedModel):
main_input_name = ["pixel_values", "task_inputs"]
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.pixel_level_module = OneFormerPixelLevelModule(config)
self.transformer_module = OneFormerTransformerModule(in_features=config.conv_dim, config=config)
self.task_encoder = OneFormerTaskModel(config)
self.is_training = config.is_training
if self.is_training:
self.text_mapper = OneFormerTextMapper(config)
else:
self.text_mapper = None
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
task_inputs: Tensor,
text_inputs: Optional[Tensor] = None,
pixel_mask: Optional[Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> OneFormerModelOutput:
r"""
task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`]
for details.
text_inputs (`list[torch.Tensor]`, *optional*):
Tensor of shape `(num_queries, sequence_length)` to be fed to a model
Example:
```python
>>> import torch
>>> from PIL import Image
>>> import requests
>>> from transformers import OneFormerProcessor, OneFormerModel
>>> # download texting image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # load processor for preprocessing the inputs
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> mask_predictions = outputs.transformer_decoder_mask_predictions
>>> class_predictions = outputs.transformer_decoder_class_predictions
>>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}"
'👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]'
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states)
multi_scale_features = pixel_level_module_output.decoder_features
mask_features = pixel_level_module_output.decoder_last_feature
task_token = self.task_encoder(task_inputs.to(self.dtype))
if self.is_training:
text_queries = self.text_mapper(text_inputs)
else:
text_queries = None
transformer_module_output = self.transformer_module(
multi_scale_features=multi_scale_features,
mask_features=mask_features,
task_token=task_token,
output_attentions=output_attentions,
)
queries = transformer_module_output.object_queries
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output.encoder_features
pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,)
for f in pixel_level_module_output.decoder_features:
pixel_decoder_hidden_states += (f,)
transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions
output = OneFormerModelOutput(
encoder_hidden_states=encoder_hidden_states,
pixel_decoder_hidden_states=pixel_decoder_hidden_states,
transformer_decoder_hidden_states=transformer_decoder_hidden_states,
transformer_decoder_object_queries=queries,
transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits,
transformer_decoder_mask_predictions=transformer_module_output.prediction_masks,
transformer_decoder_class_predictions=transformer_module_output.prediction_class,
transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions,
text_queries=text_queries,
task_token=task_token,
attentions=transformer_module_output.attentions,
)
if not return_dict:
output = tuple(v for v in output.values())
return output
@auto_docstring(
custom_intro="""
OneFormer Model for instance, semantic and panoptic image segmentation.
"""
)
| OneFormerModel |
python | langchain-ai__langchain | libs/core/langchain_core/structured_query.py | {
"start": 2295,
"end": 2668
} | class ____(BaseModel):
"""Base class for all expressions."""
def accept(self, visitor: Visitor) -> Any:
"""Accept a visitor.
Args:
visitor: visitor to accept.
Returns:
result of visiting.
"""
return getattr(visitor, f"visit_{_to_snake_case(self.__class__.__name__)}")(
self
)
| Expr |
python | cython__cython | Cython/Compiler/FlowControl.py | {
"start": 23006,
"end": 50821
} | class ____(CythonTransform):
def find_in_stack(self, env):
if env == self.env:
return self.flow
for e, flow in reversed(self.stack):
if e is env:
return flow
assert False
def visit_ModuleNode(self, node):
dot_output = self.current_directives['control_flow.dot_output']
self.gv_ctx = GVContext() if dot_output else None
from .Optimize import ConstantFolding
self.constant_folder = ConstantFolding()
# Set of NameNode reductions
self.reductions = set()
self.in_inplace_assignment = False
self.env = node.scope
self.flow = ControlFlow()
self.stack = [] # a stack of (env, flow) tuples
self.object_expr = TypedExprNode(PyrexTypes.py_object_type, may_be_none=True)
self.visitchildren(node)
check_definitions(self.flow, self.current_directives)
if dot_output:
annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
with open(dot_output, 'w') as fp:
self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
return node
def visit_FuncDefNode(self, node):
for arg in node.args:
if arg.default:
self.visitchildren(arg)
self.visitchildren(node, ('decorators',))
self.stack.append((self.env, self.flow))
self.env = node.local_scope
self.flow = ControlFlow()
# Collect all entries
for entry in node.local_scope.entries.values():
if self.flow.is_tracked(entry):
self.flow.entries.add(entry)
self.mark_position(node)
# Function body block
self.flow.nextblock()
for arg in node.args:
self._visit(arg)
if node.star_arg:
self.flow.mark_argument(node.star_arg,
TypedExprNode(Builtin.tuple_type,
may_be_none=False),
node.star_arg.entry)
if node.starstar_arg:
self.flow.mark_argument(node.starstar_arg,
TypedExprNode(Builtin.dict_type,
may_be_none=False),
node.starstar_arg.entry)
self._visit(node.body)
# Workaround for generators
if node.is_generator:
self._visit(node.gbody.body)
# Exit point
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
# Cleanup graph
self.flow.normalize()
check_definitions(self.flow, self.current_directives)
self.flow.blocks.add(self.flow.entry_point)
if self.gv_ctx is not None:
self.gv_ctx.add(GV(node.local_scope.name, self.flow))
self.env, self.flow = self.stack.pop()
return node
def visit_DefNode(self, node):
node.used = True
return self.visit_FuncDefNode(node)
def visit_GeneratorBodyDefNode(self, node):
return node
def visit_CTypeDefNode(self, node):
return node
def mark_assignment(self, lhs, rhs=None, rhs_scope=None):
if not self.flow.block:
return
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
if not rhs:
rhs = self.object_expr
if lhs.is_name:
if lhs.entry is not None:
entry = lhs.entry
else:
entry = self.env.lookup(lhs.name)
if entry is None: # TODO: This shouldn't happen...
return
self.flow.mark_assignment(lhs, rhs, entry, rhs_scope=rhs_scope)
elif lhs.is_sequence_constructor:
for i, arg in enumerate(lhs.args):
if arg.is_starred:
# "a, *b = x" assigns a list to "b"
item_node = TypedExprNode(Builtin.list_type, may_be_none=False, pos=arg.pos)
elif rhs is self.object_expr:
item_node = rhs
else:
item_node = rhs.inferable_item_node(i)
self.mark_assignment(arg, item_node, rhs_scope=rhs_scope)
else:
self._visit(lhs)
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
def mark_position(self, node):
"""Mark position if DOT output is enabled."""
if self.current_directives['control_flow.dot_output']:
self.flow.mark_position(node)
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target)
self.visitchildren(node)
return node
def visit_AssignmentNode(self, node):
raise InternalError("Unhandled assignment node %s" % type(node))
def visit_SingleAssignmentNode(self, node):
self._visit(node.rhs)
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_CascadedAssignmentNode(self, node):
self._visit(node.rhs)
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
return node
def visit_ParallelAssignmentNode(self, node):
collector = AssignmentCollector()
collector.visitchildren(node)
for lhs, rhs in collector.assignments:
self._visit(rhs)
for lhs, rhs in collector.assignments:
self.mark_assignment(lhs, rhs)
return node
def visit_InPlaceAssignmentNode(self, node):
self.in_inplace_assignment = True
self.visitchildren(node)
self.in_inplace_assignment = False
self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node()))
return node
def visit_DelStatNode(self, node):
for arg in node.args:
if arg.is_name:
entry = arg.entry or self.env.lookup(arg.name)
if entry.in_closure or entry.from_closure:
error(arg.pos,
"can not delete variable '%s' "
"referenced in nested scope" % entry.name)
if not node.ignore_nonexisting:
self._visit(arg) # mark reference
self.flow.mark_deletion(arg, entry)
else:
self._visit(arg)
return node
def visit_CArgDeclNode(self, node):
entry = self.env.lookup(node.name)
if entry:
may_be_none = not node.not_none
self.flow.mark_argument(
node, TypedExprNode(entry.type, may_be_none), entry)
return node
def visit_NameNode(self, node):
if self.flow.block:
entry = node.entry or self.env.lookup(node.name)
if entry:
self.flow.mark_reference(node, entry)
if entry in self.reductions and not self.in_inplace_assignment:
error(node.pos,
"Cannot read reduction variable in loop body")
return node
def visit_StatListNode(self, node):
if self.flow.block:
for stat in node.stats:
self._visit(stat)
if not self.flow.block:
stat.is_terminator = True
break
return node
def visit_Node(self, node):
self.visitchildren(node)
self.mark_position(node)
return node
def visit_SizeofVarNode(self, node):
return node
def visit_TypeidNode(self, node):
return node
def visit_IfStatNode(self, node):
next_block = self.flow.newblock()
parent = self.flow.block
# If clauses
for clause in node.if_clauses:
parent = self.flow.nextblock(parent)
self._visit(clause.condition)
self.flow.nextblock()
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=parent)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_AssertStatNode(self, node):
"""Essentially an if-condition that wraps a RaiseStatNode.
"""
self.mark_position(node)
next_block = self.flow.newblock()
parent = self.flow.block
# failure case
parent = self.flow.nextblock(parent)
self._visit(node.condition)
self.flow.nextblock()
self._visit(node.exception)
if self.flow.block:
self.flow.block.add_child(next_block)
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_WhileStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition block
self.flow.loops.append(LoopDescr(next_block, condition_block))
if node.condition:
self._visit(node.condition)
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def mark_forloop_target(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
env = node.iterator.expr_scope or self.env
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(env)
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type),
rhs_scope=node.iterator.expr_scope)
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name and function.name in ('range', 'xrange'):
entry = env.lookup(function.name)
if entry and entry.is_type and entry.type is Builtin.range_type:
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg, rhs_scope=node.iterator.expr_scope)
if len(sequence.args) > 2:
self.mark_assignment(target, self.constant_folder(
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2])),
rhs_scope=node.iterator.expr_scope)
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, node.item, rhs_scope=node.iterator.expr_scope)
def mark_parallel_forloop_assignment(self, node):
target = node.target
for arg in node.args[:2]:
self.mark_assignment(target, arg)
if len(node.args) > 2:
self.mark_assignment(target, self.constant_folder(
ExprNodes.binop_node(node.pos,
'+',
node.args[0],
node.args[2])))
if not node.args:
# Almost certainly an error
self.mark_assignment(target)
def visit_AsyncForStatNode(self, node):
return self.visit_ForInStatNode(node)
def visit_ForInStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.iterator)
# Target assignment
self.flow.nextblock()
if isinstance(node, Nodes.ForInStatNode):
self.mark_forloop_target(node)
elif isinstance(node, Nodes.AsyncForStatNode):
# not entirely correct, but good enough for now
self.mark_assignment(node.target, node.item)
elif isinstance(node, Nodes.ParallelRangeNode): # Parallel
self.mark_parallel_forloop_assignment(node)
else:
assert False, type(node)
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def _delete_privates(self, node, exclude=None):
for private_node in node.assigned_nodes:
if not exclude or private_node.entry is not exclude:
self.flow.mark_deletion(private_node, private_node.entry)
def visit_ParallelRangeNode(self, node):
reductions = self.reductions
# if node.target is None or not a NameNode, an error will have
# been previously issued
if hasattr(node.target, 'entry'):
self.reductions = set(reductions)
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
pos, reduction = node.assignments[private_node.entry]
if reduction:
self.reductions.add(private_node.entry)
node = self.visit_ForInStatNode(node)
self.reductions = reductions
return node
def visit_ParallelWithBlockNode(self, node):
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
self.visitchildren(node)
# lastprivate isn't allowed/doesn't make sense for a parallel (non-for) block
self._delete_privates(node)
return node
def visit_ForFromStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.bound1)
self._visit(node.bound2)
if node.step is not None:
self._visit(node.step)
# Target assignment
self.flow.nextblock()
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target, self.constant_folder(
ExprNodes.binop_node(node.pos, '+', node.bound1, node.step)))
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_LoopNode(self, node):
raise InternalError("Generic loops are not supported")
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.with_node.enter_call)
return node
def visit_WithStatNode(self, node):
self._visit(node.manager)
self._visit(node.enter_call)
self._visit(node.body)
return node
def visit_TryExceptStatNode(self, node):
# After exception handling
next_block = self.flow.newblock()
# Body block
self.flow.newblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.exceptions.append(ExceptionDescr(entry_point))
self.flow.nextblock()
## XXX: links to exception handling point should be added by
## XXX: children nodes
self.flow.block.add_child(entry_point)
self.flow.nextblock()
self.flow.in_try_block += 1
self._visit(node.body)
self.flow.in_try_block -= 1
self.flow.exceptions.pop()
# After exception
if self.flow.block:
if node.else_clause:
self.flow.nextblock()
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
for clause in node.except_clauses:
self.flow.block = entry_point
if clause.pattern:
for pattern in clause.pattern:
self._visit(pattern)
else:
# TODO: handle * pattern
pass
entry_point = self.flow.newblock(parent=self.flow.block)
self.flow.nextblock()
if clause.target:
self.mark_assignment(clause.target, clause.exc_value)
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
if self.flow.exceptions:
entry_point.add_child(self.flow.exceptions[-1].entry_point)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_TryFinallyStatNode(self, node):
body_block = self.flow.nextblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.block = entry_point
self._visit(node.finally_except_clause)
if self.flow.block and self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
# Normal execution
finally_enter = self.flow.newblock()
self.flow.block = finally_enter
self._visit(node.finally_clause)
finally_exit = self.flow.block
descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
self.flow.exceptions.append(descr)
if self.flow.loops:
self.flow.loops[-1].exceptions.append(descr)
self.flow.block = body_block
body_block.add_child(entry_point)
self.flow.nextblock()
self.flow.in_try_block += 1
self._visit(node.body)
self.flow.in_try_block -= 1
self.flow.exceptions.pop()
if self.flow.loops:
self.flow.loops[-1].exceptions.pop()
if self.flow.block:
self.flow.block.add_child(finally_enter)
if finally_exit:
self.flow.block = self.flow.nextblock(parent=finally_exit)
else:
self.flow.block = None
return node
def visit_RaiseStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
if self.flow.in_try_block:
node.in_try_block = True
return node
def visit_ReraiseStatNode(self, node):
self.mark_position(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReturnStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
outer_exception_handlers = iter(self.flow.exceptions[::-1])
for handler in outer_exception_handlers:
if handler.finally_enter:
self.flow.block.add_child(handler.finally_enter)
if handler.finally_exit:
# 'return' goes to function exit, or to the next outer 'finally' clause
exit_point = self.flow.exit_point
for next_handler in outer_exception_handlers:
if next_handler.finally_enter:
exit_point = next_handler.finally_enter
break
handler.finally_exit.add_child(exit_point)
break
else:
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
self.flow.block = None
return node
def visit_BreakStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "break statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.next_block)
break
else:
self.flow.block.add_child(loop.next_block)
self.flow.block = None
return node
def visit_ContinueStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "continue statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.loop_block)
break
else:
self.flow.block.add_child(loop.loop_block)
self.flow.block = None
return node
def visit_ComprehensionNode(self, node):
if node.expr_scope:
self.stack.append((self.env, self.flow))
self.env = node.expr_scope
# Skip append node here
self._visit(node.loop)
if node.expr_scope:
self.env, _ = self.stack.pop()
return node
def visit_ScopedExprNode(self, node):
# currently this is written to deal with these two types
# (with comprehensions covered in their own function)
assert isinstance(node, (ExprNodes.IteratorNode, ExprNodes.AsyncIteratorNode)), node
if node.expr_scope:
self.stack.append((self.env, self.flow))
self.flow = self.find_in_stack(node.expr_scope)
self.env = node.expr_scope
self.visitchildren(node)
if node.expr_scope:
self.env, self.flow = self.stack.pop()
return node
def visit_PyClassDefNode(self, node):
self.visitchildren(node, ('dict', 'metaclass',
'mkw', 'bases', 'class_result'))
self.flow.mark_assignment(node.target, node.classobj,
self.env.lookup(node.target.name))
self.stack.append((self.env, self.flow))
self.env = node.scope
self.flow.nextblock()
if node.doc_node:
self.flow.mark_assignment(node.doc_node, fake_rhs_expr, node.doc_node.entry)
self.visitchildren(node, ('body',))
self.flow.nextblock()
self.env, _ = self.stack.pop()
return node
def visit_CClassDefNode(self, node):
# just make sure the nodes scope is findable in-case there is a list comprehension in it
self.stack.append((node.scope, self.flow))
self.visitchildren(node)
self.stack.pop()
return node
def visit_AmpersandNode(self, node):
if node.operand.is_name:
# Fake assignment to silence warning
self.mark_assignment(node.operand, fake_rhs_expr)
self.visitchildren(node)
return node
def visit_BoolBinopNode(self, node):
# Note - I don't believe BoolBinopResultNode needs special handling beyond this
assert len(node.subexprs) == 2 # operand1 and operand2 only
next_block = self.flow.newblock()
parent = self.flow.block
self._visit(node.operand1)
self.flow.nextblock()
self._visit(node.operand2)
if self.flow.block:
self.flow.block.add_child(next_block)
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_CondExprNode(self, node):
assert len(node.subexprs) == 3
self._visit(node.test)
parent = self.flow.block
next_block = self.flow.newblock()
self.flow.nextblock()
self._visit(node.true_val)
if self.flow.block:
self.flow.block.add_child(next_block)
self.flow.nextblock(parent=parent)
self._visit(node.false_val)
if self.flow.block:
self.flow.block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
| ControlFlowAnalysis |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/legacy/valid_asset_subset.py | {
"start": 766,
"end": 6519
} | class ____(SerializableEntitySubset[AssetKey]):
"""Legacy construct used for doing operations over EntitySubsets that are known to be valid. This
functionality is subsumed by EntitySubset.
"""
def inverse(self, partitions_def: Optional[PartitionsDefinition]) -> "ValidAssetSubset":
"""Returns the EntitySubset containing all asset partitions which are not in this EntitySubset."""
if partitions_def is None:
return replace(self, value=not self.bool_value)
else:
value = partitions_def.subset_with_partition_keys(
self.subset_value.get_partition_keys_not_in_subset(partitions_def)
)
return replace(self, value=value)
def _oper(self, other: "ValidAssetSubset", oper: Callable[..., Any]) -> "ValidAssetSubset":
value = oper(self.value, other.value)
return replace(self, value=value)
def __sub__(self, other: SerializableEntitySubset) -> "ValidAssetSubset":
"""Returns an EntitySubset representing self.asset_partitions - other.asset_partitions."""
valid_other = self.get_valid(other)
if not self.is_partitioned:
return replace(self, value=self.bool_value and not valid_other.bool_value)
return self._oper(valid_other, operator.sub)
def __and__(self, other: SerializableEntitySubset) -> "ValidAssetSubset":
"""Returns an EntitySubset representing self.asset_partitions & other.asset_partitions."""
return self._oper(self.get_valid(other), operator.and_)
def __or__(self, other: SerializableEntitySubset) -> "ValidAssetSubset":
"""Returns an EntitySubset representing self.asset_partitions | other.asset_partitions."""
return self._oper(self.get_valid(other), operator.or_)
@staticmethod
def coerce_from_subset(
subset: SerializableEntitySubset, partitions_def: Optional[PartitionsDefinition]
) -> "ValidAssetSubset":
"""Converts an EntitySubset to a ValidAssetSubset by returning a copy of this EntitySubset
if it is compatible with the given PartitionsDefinition, otherwise returns an empty subset.
"""
if subset.is_compatible_with_partitions_def(partitions_def):
return ValidAssetSubset(key=subset.key, value=subset.value)
else:
return ValidAssetSubset.empty(subset.key, partitions_def)
def _is_compatible_with_subset(self, other: "SerializableEntitySubset") -> bool:
if isinstance(other.value, (TimeWindowPartitionsSubset, AllPartitionsSubset)):
return self.is_compatible_with_partitions_def(other.value.partitions_def)
else:
return self.is_partitioned == other.is_partitioned
def get_valid(self, other: SerializableEntitySubset) -> "ValidAssetSubset":
"""Creates a ValidAssetSubset from the given EntitySubset by returning a replace of the given
EntitySubset if it is compatible with this EntitySubset, otherwise returns an empty subset.
"""
if isinstance(other, ValidAssetSubset):
return other
elif self._is_compatible_with_subset(other):
return ValidAssetSubset(key=other.key, value=other.value)
else:
return replace(
self,
# unfortunately, this is the best way to get an empty partitions subset of an unknown
# type if you don't have access to the partitions definition
value=(self.subset_value - self.subset_value) if self.is_partitioned else False,
)
@staticmethod
def all(
asset_key: AssetKey, partitions_def: Optional[PartitionsDefinition]
) -> "ValidAssetSubset":
if partitions_def is None:
return ValidAssetSubset(key=asset_key, value=True)
else:
with partition_loading_context() as ctx:
return ValidAssetSubset(
key=asset_key, value=AllPartitionsSubset(partitions_def, ctx)
)
@staticmethod
def empty(
asset_key: AssetKey, partitions_def: Optional[PartitionsDefinition]
) -> "ValidAssetSubset":
if partitions_def is None:
return ValidAssetSubset(key=asset_key, value=False)
else:
return ValidAssetSubset(key=asset_key, value=partitions_def.empty_subset())
@staticmethod
def from_asset_partitions_set(
asset_key: AssetKey,
partitions_def: Optional[PartitionsDefinition],
asset_partitions_set: AbstractSet[AssetKeyPartitionKey],
) -> "ValidAssetSubset":
return (
ValidAssetSubset.from_partition_keys(
asset_key=asset_key,
partitions_def=partitions_def,
partition_keys={
ap.partition_key for ap in asset_partitions_set if ap.partition_key is not None
},
)
if partitions_def
else ValidAssetSubset(key=asset_key, value=bool(asset_partitions_set))
)
@staticmethod
def from_partition_keys(
asset_key: AssetKey,
partitions_def: PartitionsDefinition,
partition_keys: AbstractSet[str],
) -> "ValidAssetSubset":
return ValidAssetSubset(
key=asset_key, value=partitions_def.subset_with_partition_keys(partition_keys)
)
@property
def asset_partitions(self) -> AbstractSet[AssetKeyPartitionKey]:
if not self.is_partitioned:
return {AssetKeyPartitionKey(self.key)} if self.bool_value else set()
else:
return {
AssetKeyPartitionKey(self.key, partition_key)
for partition_key in self.subset_value.get_partition_keys()
}
| ValidAssetSubset |
python | huggingface__transformers | tests/models/stablelm/test_modeling_stablelm.py | {
"start": 1391,
"end": 5960
} | class ____(unittest.TestCase):
@slow
def test_model_stablelm_3b_4e1t_logits(self):
input_ids = {"input_ids": torch.tensor([[510, 8588, 310, 1900, 9386]], dtype=torch.long, device=torch_device)}
model = StableLmForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t").to(torch_device)
model.eval()
output = model(**input_ids).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[2.7146, 2.4245, 1.5616, 1.4424, 2.6790]]).to(torch_device)
torch.testing.assert_close(output.mean(dim=-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
# Expected logits sliced from [0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([7.1030, -1.4195, 9.9206, 7.7008, 4.9891, 4.2169, 5.5426, 3.7878, 6.7593, 5.7360, 8.4691, 5.5448, 5.0544, 10.4129, 8.5573, 13.0405, 7.3265, 3.5868, 6.1106, 5.9406, 5.6376, 5.7490, 5.4850, 4.8124, 5.1991, 4.6419, 4.5719, 9.9588, 6.7222, 4.5070]).to(torch_device) # fmt: skip
torch.testing.assert_close(output[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
@slow
def test_model_stablelm_3b_4e1t_generation(self):
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
model = StableLmForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t")
input_ids = tokenizer.encode(
"My favorite food has always been pizza, but lately",
return_tensors="pt",
)
outputs = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
EXPECTED_TEXT_COMPLETION = """My favorite food has always been pizza, but lately I’ve been craving something different. I’ve been trying to eat healthier and I’ve"""
self.assertEqual(text, EXPECTED_TEXT_COMPLETION)
@slow
def test_model_tiny_random_stablelm_2_logits(self):
# Check parallel residual and qk layernorm forward pass
input_ids = {"input_ids": torch.tensor([[510, 8588, 310, 1900, 9386]], dtype=torch.long, device=torch_device)}
model = StableLmForCausalLM.from_pretrained("stabilityai/tiny-random-stablelm-2").to(torch_device)
model.eval()
output = model(**input_ids).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.7196, -3.6099, -2.6877, -3.1973, -3.9344]]).to(torch_device)
torch.testing.assert_close(output.mean(dim=-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
# Expected logits sliced from [0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([2.8364, 5.3811, 5.1659, 7.5485, 4.3219, 6.3315, 1.3967, 6.9147, 3.9679, 6.4786, 5.9176, 3.3067, 5.2917, 0.1485, 3.9630, 7.9947,10.6727, 9.6757, 8.8772, 8.3527, 7.8445, 6.6025, 5.5786, 7.0985,6.1369, 3.4259, 1.9397, 4.6157, 4.8105, 3.1768]).to(torch_device) # fmt: skip
torch.testing.assert_close(output[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
@slow
def test_model_tiny_random_stablelm_2_generation(self):
# Check parallel residual and qk layernorm generation
tokenizer = AutoTokenizer.from_pretrained("stabilityai/tiny-random-stablelm-2")
model = StableLmForCausalLM.from_pretrained("stabilityai/tiny-random-stablelm-2")
input_ids = tokenizer.encode(
"My favorite ride at the amusement park",
return_tensors="pt",
)
outputs = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
EXPECTED_TEXT_COMPLETION = """My favorite ride at the amusement park is the 2000-mile roller coaster. It's a thrilling ride filled with roller coast"""
self.assertEqual(text, EXPECTED_TEXT_COMPLETION)
@require_bitsandbytes
@slow
@require_flash_attn
@pytest.mark.flash_attn_test
def test_model_3b_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [3, 3, 3]
input_ids = [306, 338] * 2047
model = StableLmForCausalLM.from_pretrained(
"stabilityai/stablelm-3b-4e1t",
device_map="auto",
dtype="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
attn_implementation="flash_attention_2",
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-3:].tolist())
| StableLmModelIntegrationTest |
python | huggingface__transformers | src/transformers/models/flava/configuration_flava.py | {
"start": 14809,
"end": 17720
} | class ____(PreTrainedConfig):
model_type = "flava_image_codebook"
base_config_key = "image_codebook_config"
r"""
[`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It
is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_groups (`int`, *optional*, defaults to 4):
Number of groups to be created. This parameter as of now doesn't affect the model and is used for some
internal calculation and estimations.
input_channels (`int`, *optional*, defaults to 3):
Number of channels in the image to be passed.
num_blocks_per_group (`int`, *optional*, defaults to 2):
Number of conv-based blocks per group.
hidden_size (`int`, *optional*, defaults to 256):
Size of hidden dim for the blocks.
vocab_size (`int`, *optional*, defaults to 8192):
Size of the output vocabulary for the codebook.
freeze (`bool`, defaults to `True`):
Whether to freeze the weights of the model.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook
>>> # Initializing a FlavaImageCodebook with style configuration
>>> configuration = FlavaImageCodebookConfig()
>>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration
>>> model = FlavaImageCodebook(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
def __init__(
self,
num_groups: int = 4,
input_channels: int = 3,
num_blocks_per_group: int = 2,
hidden_size: int = 256,
vocab_size: int = 8192,
freeze: int = True,
initializer_range: float = 0.02,
**kwargs,
):
super().__init__(**kwargs)
self.num_groups = num_groups
self.input_channels = input_channels
self.num_blocks_per_group = num_blocks_per_group
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.freeze = freeze
self.initializer_range = initializer_range
| FlavaImageCodebookConfig |
python | fastai__fastai | fastai/text/core.py | {
"start": 4335,
"end": 4602
} | class ____():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
# %% ../../nbs/30_text.core.ipynb 39
| BaseTokenizer |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/type/introspection.py | {
"start": 8178,
"end": 17580
} | class ____(object):
_kinds = (
(GraphQLScalarType, TypeKind.SCALAR),
(GraphQLObjectType, TypeKind.OBJECT),
(GraphQLInterfaceType, TypeKind.INTERFACE),
(GraphQLUnionType, TypeKind.UNION),
(GraphQLEnumType, TypeKind.ENUM),
(GraphQLInputObjectType, TypeKind.INPUT_OBJECT),
(GraphQLList, TypeKind.LIST),
(GraphQLNonNull, TypeKind.NON_NULL),
)
@classmethod
def kind(cls, type, *_):
for klass, kind in cls._kinds:
if isinstance(type, klass):
return kind
raise Exception('Unknown kind of type: {}'.format(type))
@staticmethod
def fields(type, args, *_):
if isinstance(type, (GraphQLObjectType, GraphQLInterfaceType)):
fields = []
include_deprecated = args.get('includeDeprecated')
for field_name, field in type.fields.items():
if field.deprecation_reason and not include_deprecated:
continue
fields.append(Field(
name=field_name,
description=field.description,
type=field.type,
args=field.args,
deprecation_reason=field.deprecation_reason
))
return fields
return None
@staticmethod
def interfaces(type, *_):
if isinstance(type, GraphQLObjectType):
return type.interfaces
@staticmethod
def possible_types(type, args, context, info):
if isinstance(type, (GraphQLInterfaceType, GraphQLUnionType)):
return info.schema.get_possible_types(type)
@staticmethod
def enum_values(type, args, *_):
if isinstance(type, GraphQLEnumType):
values = type.values
if not args.get('includeDeprecated'):
values = [v for v in values if not v.deprecation_reason]
return values
@staticmethod
def input_fields(type, *_):
if isinstance(type, GraphQLInputObjectType):
return input_fields_to_list(type.fields)
__Type = GraphQLObjectType(
'__Type',
description='The fundamental unit of any GraphQL Schema is the type. There are '
'many kinds of types in GraphQL as represented by the `__TypeKind` enum.'
'\n\nDepending on the kind of a type, certain fields describe '
'information about that type. Scalar types provide no information '
'beyond a name and description, while Enum types provide their values. '
'Object and Interface types provide the fields they describe. Abstract '
'types, Union and Interface, provide the Object types possible '
'at runtime. List and NonNull types compose other types.',
fields=lambda: OrderedDict([
('kind', GraphQLField(
type=GraphQLNonNull(__TypeKind),
resolver=TypeFieldResolvers.kind
)),
('name', GraphQLField(GraphQLString)),
('description', GraphQLField(GraphQLString)),
('fields', GraphQLField(
type=GraphQLList(GraphQLNonNull(__Field)),
args={
'includeDeprecated': GraphQLArgument(
GraphQLBoolean,
default_value=False
)
},
resolver=TypeFieldResolvers.fields
)),
('interfaces', GraphQLField(
type=GraphQLList(GraphQLNonNull(__Type)),
resolver=TypeFieldResolvers.interfaces
)),
('possibleTypes', GraphQLField(
type=GraphQLList(GraphQLNonNull(__Type)),
resolver=TypeFieldResolvers.possible_types
)),
('enumValues', GraphQLField(
type=GraphQLList(GraphQLNonNull(__EnumValue)),
args={
'includeDeprecated': GraphQLArgument(
GraphQLBoolean,
default_value=False
)
},
resolver=TypeFieldResolvers.enum_values
)),
('inputFields', GraphQLField(
type=GraphQLList(GraphQLNonNull(__InputValue)),
resolver=TypeFieldResolvers.input_fields
)),
('ofType', GraphQLField(
type=__Type,
resolver=lambda type, *_: getattr(type, 'of_type', None)
)),
]))
__Field = GraphQLObjectType(
'__Field',
description='Object and Interface types are described by a list of Fields, each of '
'which has a name, potentially a list of arguments, and a return type.',
fields=lambda: OrderedDict([
('name', GraphQLField(GraphQLNonNull(GraphQLString))),
('description', GraphQLField(GraphQLString)),
('args', GraphQLField(
type=GraphQLNonNull(GraphQLList(GraphQLNonNull(__InputValue))),
resolver=lambda field, *_: input_fields_to_list(field.args)
)),
('type', GraphQLField(GraphQLNonNull(__Type))),
('isDeprecated', GraphQLField(
type=GraphQLNonNull(GraphQLBoolean),
resolver=lambda field, *_: bool(field.deprecation_reason)
)),
('deprecationReason', GraphQLField(
type=GraphQLString,
resolver=lambda field, *_: field.deprecation_reason
))
])
)
__InputValue = GraphQLObjectType(
'__InputValue',
description='Arguments provided to Fields or Directives and the input fields of an '
'InputObject are represented as Input Values which describe their type '
'and optionally a default value.',
fields=lambda: OrderedDict([
('name', GraphQLField(GraphQLNonNull(GraphQLString))),
('description', GraphQLField(GraphQLString)),
('type', GraphQLField(GraphQLNonNull(__Type))),
('defaultValue', GraphQLField(
type=GraphQLString,
resolver=lambda input_val, *_:
None if input_val.default_value is None
else print_ast(ast_from_value(input_val.default_value, input_val))
))
]))
__EnumValue = GraphQLObjectType(
'__EnumValue',
description='One possible value for a given Enum. Enum values are unique values, not '
'a placeholder for a string or numeric value. However an Enum value is '
'returned in a JSON response as a string.',
fields=lambda: OrderedDict([
('name', GraphQLField(GraphQLNonNull(GraphQLString))),
('description', GraphQLField(GraphQLString)),
('isDeprecated', GraphQLField(
type=GraphQLNonNull(GraphQLBoolean),
resolver=lambda field, *_: bool(field.deprecation_reason)
)),
('deprecationReason', GraphQLField(
type=GraphQLString,
resolver=lambda enum_value, *_: enum_value.deprecation_reason,
))
]))
__TypeKind = GraphQLEnumType(
'__TypeKind',
description='An enum describing what kind of type a given `__Type` is',
values=OrderedDict([
('SCALAR', GraphQLEnumValue(
TypeKind.SCALAR,
description='Indicates this type is a scalar.'
)),
('OBJECT', GraphQLEnumValue(
TypeKind.OBJECT,
description='Indicates this type is an object. '
'`fields` and `interfaces` are valid fields.'
)),
('INTERFACE', GraphQLEnumValue(
TypeKind.INTERFACE,
description='Indicates this type is an interface. '
'`fields` and `possibleTypes` are valid fields.'
)),
('UNION', GraphQLEnumValue(
TypeKind.UNION,
description='Indicates this type is a union. '
'`possibleTypes` is a valid field.'
)),
('ENUM', GraphQLEnumValue(
TypeKind.ENUM,
description='Indicates this type is an enum. '
'`enumValues` is a valid field.'
)),
('INPUT_OBJECT', GraphQLEnumValue(
TypeKind.INPUT_OBJECT,
description='Indicates this type is an input object. '
'`inputFields` is a valid field.'
)),
('LIST', GraphQLEnumValue(
TypeKind.LIST,
description='Indicates this type is a list. '
'`ofType` is a valid field.'
)),
('NON_NULL', GraphQLEnumValue(
TypeKind.NON_NULL,
description='Indicates this type is a non-null. '
'`ofType` is a valid field.'
)),
]))
IntrospectionSchema = __Schema
SchemaMetaFieldDef = GraphQLField(
# name='__schema',
type=GraphQLNonNull(__Schema),
description='Access the current type schema of this server.',
resolver=lambda source, args, context, info: info.schema,
args={}
)
TypeMetaFieldDef = GraphQLField(
type=__Type,
# name='__type',
description='Request the type information of a single type.',
args={'name': GraphQLArgument(GraphQLNonNull(GraphQLString))},
resolver=lambda source, args, context, info: info.schema.get_type(args['name'])
)
TypeNameMetaFieldDef = GraphQLField(
type=GraphQLNonNull(GraphQLString),
# name='__typename',
description='The name of the current Object type at runtime.',
resolver=lambda source, args, context, info: info.parent_type.name,
args={}
)
| TypeFieldResolvers |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore.py | {
"start": 4060,
"end": 16229
} | class ____(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, num_trials=20, delay_s=0.3):
self.num_trials = num_trials
self.delay_s = delay_s
self.fail_at = None
def on_step_end(self, trials, **kwargs):
if self.fail_at:
if time.monotonic() >= self.fail_at:
raise RuntimeError(f"Failing after {self.delay_s}")
return
if len(trials) >= self.num_trials:
print(
f"Reached {self.num_trials} trials. "
f"Scheduling failure in {self.delay_s} seconds."
)
self.fail_at = time.monotonic() + self.delay_s
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
class CheckTrialResourcesCallback(Callback):
"""Checks if pending trials are requesting the right amount of
resources.
The check happens exactly once after `check_after` number of calls
to on_step_begin(). Note, we deliberately delay the check to after
`check_after` number of steps. This is because when we start a
tuning job from fresh (rather than restored), trial list is still
empty - any check now would be trivial and thus wasted.
"""
def __init__(self, expected_cpu: int, check_after: int = 1):
self._expected_cpu = expected_cpu
self._checked = False
self._check_after = check_after
def on_step_begin(self, iteration: int, trials: List["Trial"], **info):
if not self._checked and iteration >= self._check_after:
for trial in trials:
if trial.status == Trial.PENDING:
assert (
trial.placement_group_factory.required_resources.get(
"CPU", 0
)
== self._expected_cpu
)
self._checked = True
def setUp(self):
self.logdir = tempfile.mkdtemp()
# These tests need driver syncing to happen before the crash happens
# so that they can pick up from the *exact* state it left off at.
# We do this by failing after a delay of 0.3s > TUNE_GLOBAL_CHECKPOINT_S
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0.1"
# Change back to local_mode=True after this is resolved:
# https://github.com/ray-project/ray/issues/13932
ray.init(local_mode=False, num_cpus=2)
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
os.environ.pop("TUNE_MAX_PENDING_TRIALS_PG", None)
shutil.rmtree(self.logdir)
ray.shutdown()
def testFailResumeGridSearch(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
name="testFailResumeGridSearch",
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run("trainable", callbacks=[self.FailureInjectorCallback()], **config)
analysis = tune.run(
"trainable", resume=True, callbacks=[self.CheckStateCallback()], **config
)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
# Unfinished trials' resources should be updated.
def testResourceUpdateInResume(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
name="testResourceUpdateInResume",
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[
self.FailureInjectorCallback(),
self.CheckTrialResourcesCallback(1),
],
**config,
)
analysis = tune.run(
"trainable",
resume=True,
resources_per_trial={"cpu": 2},
callbacks=[self.CheckTrialResourcesCallback(2)],
**config,
)
assert len(analysis.trials) == 27
@mock.patch.dict(os.environ, {"TUNE_MAX_PENDING_TRIALS_PG": "1"})
def testConfigUpdateInResume(self):
class FakeDataset:
def __init__(self, name):
self.name = name
config = dict(
num_samples=1,
fail_fast=True,
config={
"test": tune.grid_search(
[FakeDataset("1"), FakeDataset("2"), FakeDataset("3")]
),
"test2": tune.grid_search(
[
FakeDataset("4"),
FakeDataset("5"),
FakeDataset("6"),
FakeDataset("7"),
]
),
},
stop={"training_iteration": 2},
name="testConfigUpdateInResume",
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[
self.FailureInjectorCallback(num_trials=1),
self.CheckTrialResourcesCallback(1),
],
**config,
)
config["config"] = {
"test": tune.grid_search(
[FakeDataset("8"), FakeDataset("9"), FakeDataset("10")]
),
"test2": tune.grid_search(
[
FakeDataset("11"),
FakeDataset("12"),
FakeDataset("13"),
FakeDataset("14"),
]
),
}
analysis = tune.run(
"trainable",
resume=True,
**config,
)
assert len(analysis.trials) == 12
for t in analysis.trials:
# Make sure that test and test2 are updated.
assert t.config["test"].name in ["8", "9", "10"]
assert t.config["test2"].name in ["11", "12", "13", "14"]
def testFailResumeWithPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
name="testFailResumeWithPreset",
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config,
)
print("---- RESTARTING RUN ----")
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
name="testFailResumeAfterPreset",
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config,
)
print("---- RESTARTING RUN ----")
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="testMultiExperimentFail",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
)
)
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True,
)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True,
)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
name="testWarningLargeGrid",
verbose=1,
)
with self.assertWarnsRegex(UserWarning, "exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable", callbacks=[self.FailureInjectorCallback(10)], **config
)
| TuneFailResumeGridTest |
python | kamyu104__LeetCode-Solutions | Python/count-connected-components-in-lcm-graph.py | {
"start": 1522,
"end": 2012
} | class ____(object):
def countComponents(self, nums, threshold):
"""
:type nums: List[int]
:type threshold: int
:rtype: int
"""
uf = UnionFind(threshold)
lookup = [-1]*threshold
for x in nums:
if x-1 >= threshold:
continue
for i in xrange(x+x, threshold+1, x):
uf.union_set(i-1, x-1)
return sum(x-1 >= threshold or uf.find_set(x-1) == x-1 for x in nums)
| Solution2 |
python | getsentry__sentry | tests/sentry/utils/test_query.py | {
"start": 1134,
"end": 6159
} | class ____(TestCase):
range_wrapper = RangeQuerySetWrapper
def test_basic(self) -> None:
total = 10
for _ in range(total):
self.create_user()
qs = User.objects.all()
assert len(list(self.range_wrapper(qs, step=2))) == total
assert len(list(self.range_wrapper(qs, limit=5))) == 5
def test_loop_and_delete(self) -> None:
total = 10
for _ in range(total):
self.create_user()
qs = User.objects.all()
for user in self.range_wrapper(qs, step=2):
user.delete()
assert User.objects.all().count() == 0
def test_empty(self) -> None:
qs = User.objects.all()
assert len(list(self.range_wrapper(qs, step=2))) == 0
def test_order_by_non_unique_fails(self) -> None:
qs = User.objects.all()
with pytest.raises(InvalidQuerySetError):
self.range_wrapper(qs, order_by="name")
# Shouldn't error if the safety check is disabled
self.range_wrapper(qs, order_by="name", override_unique_safety_check=True)
def test_order_by_unique(self) -> None:
self.create_user()
qs = User.objects.all()
self.range_wrapper(qs, order_by="username")
assert len(list(self.range_wrapper(qs, order_by="username", step=2))) == 1
def test_wrapper_over_values_list(self) -> None:
self.create_user()
qs = User.objects.all().values_list("id")
assert list(qs) == list(self.range_wrapper(qs, result_value_getter=lambda r: r[0]))
def test_retry_on_operational_error_success_after_failures(self) -> None:
"""Test that with query_timeout_retries=3, after 2 errors and 1 success it works."""
total = 5
for _ in range(total):
self.create_user()
qs = User.objects.all()
batch_attempts: list[int] = []
current_batch_count = 0
original_getitem = type(qs).__getitem__
def mock_getitem(self, slice_obj):
nonlocal current_batch_count
current_batch_count += 1
if len(batch_attempts) == 0 and current_batch_count <= 2:
raise OperationalError("canceling statement due to user request")
if len(batch_attempts) == 0 and current_batch_count == 3:
batch_attempts.append(current_batch_count)
return original_getitem(self, slice_obj)
with patch.object(type(qs), "__getitem__", mock_getitem):
results = list(
self.range_wrapper(qs, step=10, query_timeout_retries=3, retry_delay_seconds=0.01)
)
assert len(results) == total
assert batch_attempts[0] == 3
def test_retry_exhausted_raises_exception(self) -> None:
"""Test that after exhausting retries, the OperationalError is raised."""
total = 5
for _ in range(total):
self.create_user()
qs = User.objects.all()
def always_fail(self, slice_obj):
raise OperationalError("canceling statement due to user request")
with patch.object(type(qs), "__getitem__", always_fail):
with pytest.raises(OperationalError, match="canceling statement due to user request"):
list(
self.range_wrapper(
qs, step=10, query_timeout_retries=3, retry_delay_seconds=0.01
)
)
def test_retry_does_not_catch_other_exceptions(self) -> None:
"""Test that non-OperationalError exceptions are not retried."""
total = 5
for _ in range(total):
self.create_user()
qs = User.objects.all()
attempt_count = {"count": 0}
def raise_value_error(self, slice_obj):
attempt_count["count"] += 1
raise ValueError("Some other error")
with patch.object(type(qs), "__getitem__", raise_value_error):
with pytest.raises(ValueError, match="Some other error"):
list(
self.range_wrapper(
qs, step=10, query_timeout_retries=3, retry_delay_seconds=0.01
)
)
assert attempt_count["count"] == 1
def test_no_retry_when_query_timeout_retries_is_none(self) -> None:
"""Test that when query_timeout_retries is None, no retry logic is applied."""
total = 5
for _ in range(total):
self.create_user()
qs = User.objects.all()
attempt_count = {"count": 0}
def fail_once(self, slice_obj):
attempt_count["count"] += 1
raise OperationalError("canceling statement due to user request")
with patch.object(type(qs), "__getitem__", fail_once):
with pytest.raises(OperationalError, match="canceling statement due to user request"):
list(self.range_wrapper(qs, step=10, query_timeout_retries=None))
assert attempt_count["count"] == 1
@no_silo_test
| RangeQuerySetWrapperTest |
python | streamlit__streamlit | lib/tests/streamlit/web/bootstrap_test.py | {
"start": 998,
"end": 15803
} | class ____(IsolatedAsyncioTestCase):
"""Test bootstrap.py's printing functions.
(We use `IsolatedAsyncioTestCase` to ensure that an asyncio event loop
exists in tests that implicitly rely on one.)
"""
def setUp(self):
self.orig_stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout.close() # sys.stdout is a StringIO at this point.
sys.stdout = self.orig_stdout
def test_print_hello_message(self):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": True}
)
mock_get_option = testutil.build_mock_config_get_option(
{"browser.serverAddress": "the-address"}
)
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(True)
out = sys.stdout.getvalue()
assert "Welcome to Streamlit. Check out our demo in your browser." in out
assert "URL: http://the-address" in out
def test_print_urls_configured(self):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": True}
)
mock_get_option = testutil.build_mock_config_get_option(
{"browser.serverAddress": "the-address"}
)
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "You can now view your Streamlit app in your browser." in out
assert "URL: http://the-address" in out
@patch("streamlit.net_util.get_external_ip")
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_remote(self, mock_get_internal_ip, mock_get_external_ip):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{"server.headless": True}
)
mock_get_internal_ip.return_value = "internal-ip"
mock_get_external_ip.return_value = "external-ip"
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost" in out
assert "Network URL: http://internal-ip" in out
assert "External URL: http://external-ip" in out
@patch("streamlit.net_util.get_external_ip")
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_remote_no_external(
self, mock_get_internal_ip, mock_get_external_ip
):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{"server.headless": True}
)
mock_get_internal_ip.return_value = "internal-ip"
mock_get_external_ip.return_value = None
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost" in out
assert "Network URL: http://internal-ip" in out
assert "External URL: http://external-ip" not in out
@patch("streamlit.net_util.get_external_ip")
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_remote_no_internal(
self, mock_get_internal_ip, mock_get_external_ip
):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{"server.headless": True}
)
mock_get_internal_ip.return_value = None
mock_get_external_ip.return_value = "external-ip"
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost" in out
assert "Network URL: http://internal-ip" not in out
assert "External URL: http://external-ip" in out
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_local(self, mock_get_internal_ip):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{"server.headless": False}
)
mock_get_internal_ip.return_value = "internal-ip"
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost" in out
assert "Network URL: http://internal-ip" in out
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_port(self, mock_get_internal_ip):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{
"server.headless": False,
"server.port": 9988,
"global.developmentMode": False,
}
)
mock_get_internal_ip.return_value = "internal-ip"
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost:9988" in out
assert "Network URL: http://internal-ip:9988" in out
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_base(self, mock_get_internal_ip):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{
"server.headless": False,
"server.baseUrlPath": "foo",
"server.port": 8501,
"global.developmentMode": False,
}
)
mock_get_internal_ip.return_value = "internal-ip"
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost:8501/foo" in out
assert "Network URL: http://internal-ip:8501/foo" in out
@patch("streamlit.net_util.get_internal_ip")
def test_print_urls_base_no_internal(self, mock_get_internal_ip):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{
"server.headless": False,
"server.baseUrlPath": "foo",
"server.port": 8501,
"global.developmentMode": False,
}
)
mock_get_internal_ip.return_value = None
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: http://localhost:8501/foo" in out
assert "Network URL: http://internal-ip:8501/foo" not in out
@patch("streamlit.net_util.get_internal_ip", return_value="internal-ip")
def test_print_urls_ssl(self, mock_get_internal_ip):
with patch_config_options(
{
"server.headless": False,
"server.port": 9988,
"global.developmentMode": False,
"server.sslCertFile": "/tmp/aa",
"server.sslKeyFile": "/tmp/aa",
}
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Local URL: https://localhost:9988" in out
assert "Network URL: https://internal-ip:9988" in out
def test_print_socket(self):
mock_is_manually_set = testutil.build_mock_config_is_manually_set(
{"browser.serverAddress": False}
)
mock_get_option = testutil.build_mock_config_get_option(
{
"server.address": "unix://mysocket.sock",
"global.developmentMode": False,
}
)
with (
patch.object(config, "get_option", new=mock_get_option),
patch.object(config, "is_manually_set", new=mock_is_manually_set),
):
bootstrap._print_url(False)
out = sys.stdout.getvalue()
assert "Unix Socket: unix://mysocket.sock" in out
@patch("streamlit.web.bootstrap.asyncio.get_running_loop", Mock())
@patch("streamlit.web.bootstrap.secrets.load_if_toml_exists", Mock())
@patch("streamlit.web.bootstrap._maybe_print_static_folder_warning")
def test_maybe_print_static_folder_warning_called_once_on_server_start(
self, mock_maybe_print_static_folder_warning
):
"""We should trigger _maybe_print_static_folder_warning on server start."""
bootstrap._on_server_start(Mock())
mock_maybe_print_static_folder_warning.assert_called_once()
@patch("os.path.isdir", Mock(return_value=False))
@patch("click.secho")
def test_maybe_print_static_folder_warning_if_folder_doesnt_exist(self, mock_echo):
"""We should print a warning when static folder does not exist."""
with testutil.patch_config_options({"server.enableStaticServing": True}):
bootstrap._maybe_print_static_folder_warning("app_root/main_script_path")
mock_echo.assert_called_once_with(
"WARNING: Static file serving is enabled, but no static folder found "
f"at {os.path.abspath('app_root/static')}. To disable static file "
f"serving, set server.enableStaticServing to false.",
fg="yellow",
)
@patch("os.path.isdir", Mock(return_value=True))
@patch(
"streamlit.file_util.get_directory_size",
Mock(return_value=(2 * bootstrap.MAX_APP_STATIC_FOLDER_SIZE)),
)
@patch("click.secho")
def test_maybe_print_static_folder_warning_if_folder_is_too_large(self, mock_echo):
"""
We should print a warning and disable static files serving when static
folder total size is too large.
"""
with (
testutil.patch_config_options({"server.enableStaticServing": True}),
patch.object(config, "set_option") as mock_set_option,
):
bootstrap._maybe_print_static_folder_warning("app_root/main_script_path")
mock_echo.assert_called_once_with(
"WARNING: Static folder size is larger than 1GB. "
"Static file serving has been disabled.",
fg="yellow",
)
mock_set_option.assert_called_once_with("server.enableStaticServing", False)
@patch("streamlit.config.get_config_options")
def test_load_config_options(self, patched_get_config_options):
"""Test that bootstrap.load_config_options parses the keys properly and
passes down the parameters.
"""
flag_options = {
"server_port": 3005,
"server_headless": True,
"browser_serverAddress": "localhost",
"logger_level": "error",
# global_minCachedMessageSize shouldn't be set below since it's None.
"global_minCachedMessageSize": None,
}
bootstrap.load_config_options(flag_options)
patched_get_config_options.assert_called_once_with(
force_reparse=True,
options_from_flags={
"server.port": 3005,
"server.headless": True,
"browser.serverAddress": "localhost",
"logger.level": "error",
},
)
@patch("streamlit.web.bootstrap.asyncio.get_running_loop", Mock())
@patch("streamlit.web.bootstrap._maybe_print_static_folder_warning", Mock())
@patch("streamlit.web.bootstrap.secrets.load_if_toml_exists")
def test_load_secrets(self, mock_load_secrets):
"""We should load secrets.toml on startup."""
bootstrap._on_server_start(Mock())
mock_load_secrets.assert_called_once()
@patch("streamlit.web.bootstrap.asyncio.get_running_loop", Mock())
@patch("streamlit.web.bootstrap._maybe_print_static_folder_warning", Mock())
@patch("streamlit.web.bootstrap._LOGGER.error")
@patch("streamlit.web.bootstrap.secrets.load_if_toml_exists")
def test_log_secret_load_error(self, mock_load_secrets, mock_log_error):
"""If secrets throws an error on startup, we catch and log it."""
mock_exception = Exception("Secrets exploded!")
mock_load_secrets.side_effect = mock_exception
bootstrap._on_server_start(Mock())
mock_log_error.assert_called_once_with(
"Failed to load secrets.toml file",
exc_info=True,
)
@patch("streamlit.config.get_config_options")
@patch("streamlit.web.bootstrap.watch_file")
def test_install_config_watcher(
self, patched_watch_file, patched_get_config_options
):
with patch("os.path.exists", return_value=True):
bootstrap._install_config_watchers(flag_options={"server_port": 8502})
assert patched_watch_file.call_count == 2
args, _kwargs = patched_watch_file.call_args_list[0]
on_config_changed = args[1]
# Simulate a config file change being detected.
on_config_changed("/unused/nonexistent/file/path")
patched_get_config_options.assert_called_once_with(
force_reparse=True,
options_from_flags={
"server.port": 8502,
},
)
| BootstrapPrintTest |
python | conda__conda | conda/models/records.py | {
"start": 21446,
"end": 23313
} | class ____(SolvedRecord):
"""Representation of a package that is installed in a local conda environmnet.
Specialization of :class:`PackageRecord` that adds information for packages that are installed
in a local conda environment or prefix.
Note that this class does not add new fields to the :attr:`PackageRecord._pkey` so that a pure
:class:`PackageRecord` object that has the same ``_pkey`` fields as a different
:class:`PrefixRecord` object (or, indeed, a :class:`PackageCacheRecord` object) will be considered
equal and will produce the same hash.
Objects of this class are generally constructed from metadata in json files inside `$prefix/conda-meta`.
"""
#: str: The path to the originating package file, usually in the local cache.
package_tarball_full_path = StringField(required=False)
#: str: The path to the extracted package directory, usually in the local cache.
extracted_package_dir = StringField(required=False)
#: list(str): The list of all files comprising the package as relative paths from the prefix root.
files = ListField(str, default=(), required=False)
#: list(str): List with additional information about the files, e.g. checksums and link type.
paths_data = ComposableField(
PathsData, required=False, nullable=True, default_in_dump=False
)
#: :class:`Link`: Information about how the package was linked into the prefix.
link = ComposableField(Link, required=False)
# app = ComposableField(App, required=False)
# There have been requests in the past to save remote server auth
# information with the package. Open to rethinking that though.
#: str: Authentication information.
auth = StringField(required=False, nullable=True)
# @classmethod
# def load(cls, conda_meta_json_path):
# return cls()
| PrefixRecord |
python | dateutil__dateutil | setup.py | {
"start": 477,
"end": 1375
} | class ____(TestCommand):
def run(self):
sys.stderr.write("Running 'test' with setup.py is not supported. "
"Use 'pytest' or 'tox' to run the tests.\n")
sys.exit(1)
###
# Load metadata
def README():
with io.open('README.rst', encoding='utf-8') as f:
readme_lines = f.readlines()
# The .. doctest directive is not supported by PyPA
lines_out = []
for line in readme_lines:
if line.startswith('.. doctest'):
lines_out.append('.. code-block:: python3\n')
else:
lines_out.append(line)
return ''.join(lines_out)
README = README() # NOQA
setup(
use_scm_version={
'write_to': 'src/dateutil/_version.py',
},
## Needed since doctest not supported by PyPA.
long_description = README,
cmdclass={
"test": Unsupported
}
)
| Unsupported |
python | numba__numba | numba/core/base.py | {
"start": 877,
"end": 5850
} | class ____(object):
"""
An object matching an actual signature against a registry of formal
signatures and choosing the best candidate, if any.
In the current implementation:
- a "signature" is a tuple of type classes or type instances
- the "best candidate" is the most specific match
"""
def __init__(self):
# A list of (formal args tuple, value)
self.versions = []
self._cache = {}
def find(self, sig):
out = self._cache.get(sig)
if out is None:
out = self._find(sig)
self._cache[sig] = out
return out
def _find(self, sig):
candidates = self._select_compatible(sig)
if candidates:
return candidates[self._best_signature(candidates)]
else:
raise errors.NumbaNotImplementedError(f'{self}, {sig}')
def _select_compatible(self, sig):
"""
Select all compatible signatures and their implementation.
"""
out = {}
for ver_sig, impl in self.versions:
if self._match_arglist(ver_sig, sig):
out[ver_sig] = impl
return out
def _best_signature(self, candidates):
"""
Returns the best signature out of the candidates
"""
ordered, genericity = self._sort_signatures(candidates)
# check for ambiguous signatures
if len(ordered) > 1:
firstscore = genericity[ordered[0]]
same = list(takewhile(lambda x: genericity[x] == firstscore,
ordered))
if len(same) > 1:
msg = ["{n} ambiguous signatures".format(n=len(same))]
for sig in same:
msg += ["{0} => {1}".format(sig, candidates[sig])]
raise errors.NumbaTypeError('\n'.join(msg))
return ordered[0]
def _sort_signatures(self, candidates):
"""
Sort signatures in ascending level of genericity.
Returns a 2-tuple:
* ordered list of signatures
* dictionary containing genericity scores
"""
# score by genericity
genericity = defaultdict(int)
for this, other in permutations(candidates.keys(), r=2):
matched = self._match_arglist(formal_args=this, actual_args=other)
if matched:
# genericity score +1 for every another compatible signature
genericity[this] += 1
# order candidates in ascending level of genericity
ordered = sorted(candidates.keys(), key=lambda x: genericity[x])
return ordered, genericity
def _match_arglist(self, formal_args, actual_args):
"""
Returns True if the signature is "matching".
A formal signature is "matching" if the actual signature matches exactly
or if the formal signature is a compatible generic signature.
"""
# normalize VarArg
if formal_args and isinstance(formal_args[-1], types.VarArg):
ndiff = len(actual_args) - len(formal_args) + 1
formal_args = formal_args[:-1] + (formal_args[-1].dtype,) * ndiff
if len(formal_args) != len(actual_args):
return False
for formal, actual in zip(formal_args, actual_args):
if not self._match(formal, actual):
return False
return True
def _match(self, formal, actual):
if formal == actual:
# formal argument matches actual arguments
return True
elif types.Any == formal:
# formal argument is any
return True
elif isinstance(formal, type) and issubclass(formal, types.Type):
if isinstance(actual, type) and issubclass(actual, formal):
# formal arg is a type class and actual arg is a subclass
return True
elif isinstance(actual, formal):
# formal arg is a type class of which actual arg is an instance
return True
def append(self, value, sig):
"""
Add a formal signature and its associated value.
"""
assert isinstance(sig, tuple), (value, sig)
self.versions.append((sig, value))
self._cache.clear()
@utils.runonce
def _load_global_helpers():
"""
Execute once to install special symbols into the LLVM symbol table.
"""
# This is Py_None's real C name
ll.add_symbol("_Py_NoneStruct", id(None))
# Add Numba C helper functions
for c_helpers in (_helperlib.c_helpers, _dynfunc.c_helpers):
for py_name, c_address in c_helpers.items():
c_name = "numba_" + py_name
ll.add_symbol(c_name, c_address)
# Add all built-in exception classes
for obj in utils.builtins.__dict__.values():
if isinstance(obj, type) and issubclass(obj, BaseException):
ll.add_symbol("PyExc_%s" % (obj.__name__), id(obj))
| OverloadSelector |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 29916,
"end": 30506
} | class ____(ActionTool):
''' *toolbar icon*: |copy_icon|
The copy tool is an action tool, that allows copying the rendered contents of
a plot or a collection of plots to system's clipboard. This tools is browser
dependent and may not function in certain browsers, or require additional
permissions to be granted to the web page.
.. |copy_icon| image:: /_images/icons/copy.svg
:height: 24px
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| CopyTool |
python | spack__spack | lib/spack/spack/environment/environment.py | {
"start": 120039,
"end": 120176
} | class ____(SpackEnvironmentError):
"""Class for errors in applying develop information to an environment."""
| SpackEnvironmentDevelopError |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-huggingface-openvino/llama_index/embeddings/huggingface_openvino/base.py | {
"start": 697,
"end": 9646
} | class ____(BaseEmbedding):
model_id_or_path: str = Field(description="Huggingface model id or local path.")
max_length: int = Field(description="Maximum length of input.")
pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].")
normalize: bool = Field(default=True, description="Normalize embeddings or not.")
query_instruction: Optional[str] = Field(
description="Instruction to prepend to query text."
)
text_instruction: Optional[str] = Field(
description="Instruction to prepend to text."
)
cache_folder: Optional[str] = Field(
description="Cache folder for huggingface files.", default=None
)
_model: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_device: Any = PrivateAttr()
def __init__(
self,
model_id_or_path: str = "BAAI/bge-m3",
pooling: str = "cls",
max_length: Optional[int] = None,
normalize: bool = True,
query_instruction: Optional[str] = None,
text_instruction: Optional[str] = None,
model: Optional[Any] = None,
tokenizer: Optional[Any] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
model_kwargs: Dict[str, Any] = {},
device: Optional[str] = "auto",
):
try:
from huggingface_hub import HfApi
except ImportError as e:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with: "
"`pip install -U huggingface_hub`."
) from e
def require_model_export(
model_id: str, revision: Any = None, subfolder: Any = None
) -> bool:
model_dir = Path(model_id)
if subfolder is not None:
model_dir = model_dir / subfolder
if model_dir.is_dir():
return (
not (model_dir / "openvino_model.xml").exists()
or not (model_dir / "openvino_model.bin").exists()
)
hf_api = HfApi()
try:
model_info = hf_api.model_info(model_id, revision=revision or "main")
normalized_subfolder = (
None if subfolder is None else Path(subfolder).as_posix()
)
model_files = [
file.rfilename
for file in model_info.siblings
if normalized_subfolder is None
or file.rfilename.startswith(normalized_subfolder)
]
ov_model_path = (
"openvino_model.xml"
if subfolder is None
else f"{normalized_subfolder}/openvino_model.xml"
)
return (
ov_model_path not in model_files
or ov_model_path.replace(".xml", ".bin") not in model_files
)
except Exception:
return True
if require_model_export(model_id_or_path):
# use remote model
model = model or OVModelForFeatureExtraction.from_pretrained(
model_id_or_path, export=True, device=device, **model_kwargs
)
else:
# use local model
model = model or OVModelForFeatureExtraction.from_pretrained(
model_id_or_path, device=device, **model_kwargs
)
tokenizer = tokenizer or AutoTokenizer.from_pretrained(model_id_or_path)
if max_length is None:
try:
max_length = int(model.config.max_position_embeddings)
except Exception:
raise ValueError(
"Unable to find max_length from model config. "
"Please provide max_length."
)
try:
max_length = min(max_length, int(tokenizer.model_max_length))
except Exception as exc:
print(f"An error occurred while retrieving tokenizer max length: {exc}")
if pooling not in ["cls", "mean"]:
raise ValueError(f"Pooling {pooling} not supported.")
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager or CallbackManager([]),
model_id_or_path=model_id_or_path,
max_length=max_length,
pooling=pooling,
normalize=normalize,
query_instruction=query_instruction,
text_instruction=text_instruction,
)
self._device = device
self._model = model
self._tokenizer = tokenizer
@classmethod
def class_name(cls) -> str:
return "OpenVINOEmbedding"
@staticmethod
def create_and_save_openvino_model(
model_name_or_path: str,
output_path: str,
export_kwargs: Optional[dict] = None,
) -> None:
try:
from optimum.intel.openvino import OVModelForFeatureExtraction
from transformers import AutoTokenizer
from optimum.exporters.openvino.convert import export_tokenizer
except ImportError:
raise ImportError(
"OpenVINO Embedding requires transformers and optimum to be installed.\n"
"Please install transformers with "
"`pip install transformers optimum[openvino]`."
)
export_kwargs = export_kwargs or {}
model = OVModelForFeatureExtraction.from_pretrained(
model_name_or_path, export=True, compile=False, **export_kwargs
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
export_tokenizer(tokenizer, output_path)
print(
f"Saved OpenVINO model to {output_path}. Use it with "
f"`embed_model = OpenVINOEmbedding(model_id_or_path='{output_path}')`."
)
def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any:
"""Mean Pooling - Take attention mask into account for correct averaging."""
import torch
# First element of model_output contains all token embeddings
token_embeddings = model_output[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
def _cls_pooling(self, model_output: list) -> Any:
"""Use the CLS token as the pooling token."""
return model_output[0][:, 0]
def _embed(self, sentences: List[str]) -> List[List[float]]:
"""Embed sentences."""
length = self._model.request.inputs[0].get_partial_shape()[1]
if length.is_dynamic:
encoded_input = self._tokenizer(
sentences,
padding=True,
max_length=self.max_length,
truncation=True,
return_tensors="pt",
)
else:
encoded_input = self._tokenizer(
sentences,
padding="max_length",
max_length=length.get_length(),
truncation=True,
return_tensors="pt",
)
model_output = self._model(**encoded_input)
if self.pooling == "cls":
embeddings = self._cls_pooling(model_output)
else:
embeddings = self._mean_pooling(
model_output, encoded_input["attention_mask"]
)
if self.normalize:
import torch
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings.tolist()
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
query = format_query(query, self.model_name, self.query_instruction)
return self._embed([query])[0]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding async."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Get text embedding async."""
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
text = format_text(text, self.model_name, self.text_instruction)
return self._embed([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
texts = [
format_text(text, self.model_name, self.text_instruction) for text in texts
]
return self._embed(texts)
| OpenVINOEmbedding |
python | TheAlgorithms__Python | neural_network/input_data.py | {
"start": 1047,
"end": 3784
} | class ____(typing.NamedTuple):
train: "_DataSet"
validation: "_DataSet"
test: "_DataSet"
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder(">")
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
@deprecated(None, "Please use tf.data to implement this functionality.")
def _extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print("Extracting", f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
msg = f"Invalid magic number {magic} in MNIST image file: {f.name}"
raise ValueError(msg)
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
@deprecated(None, "Please use tf.one_hot on tensors.")
def _dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
@deprecated(None, "Please use tf.data to implement this functionality.")
def _extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print("Extracting", f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
msg = f"Invalid magic number {magic} in MNIST label file: {f.name}"
raise ValueError(msg)
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
return _dense_to_one_hot(labels, num_classes)
return labels
| _Datasets |
python | facebook__pyre-check | client/configuration/exceptions.py | {
"start": 480,
"end": 610
} | class ____(InvalidConfiguration):
def __init__(self, message: str) -> None:
super().__init__(message)
| InvalidPythonVersion |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 2781,
"end": 2981
} | class ____(JoseError):
error = "insecure_claim"
def __init__(self, claim):
description = f"Insecure claim '{claim}'"
super().__init__(description=description)
| InsecureClaimError |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_Google.py | {
"start": 6226,
"end": 6558
} | class ____:
def __init__(self, x, y): # [missing-param-doc, missing-type-doc]
"""test_constr_params_in_init_google
Example of a class with missing constructor parameter documentation
(Google style)
Args:
y: bla
missing constructor parameter documentation
"""
| ClassFoo |
python | Textualize__textual | src/textual/command.py | {
"start": 10183,
"end": 12010
} | class ____(Provider):
"""A simple provider which the caller can pass commands to."""
def __init__(
self,
screen: Screen[Any],
commands: list[CommandListItem],
) -> None:
# Convert all commands to SimpleCommand instances
super().__init__(screen, None)
self._commands: list[SimpleCommand] = []
for command in commands:
if isinstance(command, SimpleCommand):
self._commands.append(command)
elif len(command) == 2:
self._commands.append(SimpleCommand(*command, None))
elif len(command) == 3:
self._commands.append(SimpleCommand(*command))
else:
raise ValueError(f"Invalid command: {command}")
def __call__(
self, screen: Screen[Any], match_style: Style | None = None
) -> SimpleProvider:
self.__match_style = match_style
return self
@property
def match_style(self) -> Style | None:
return self.__match_style
async def search(self, query: str) -> Hits:
matcher = self.matcher(query)
for name, callback, help_text in self._commands:
if (match := matcher.match(name)) > 0:
yield Hit(
match,
matcher.highlight(name),
callback,
help=help_text,
)
async def discover(self) -> Hits:
"""Handle a request for the discovery commands for this provider.
Yields:
Commands that can be discovered.
"""
for name, callback, help_text in self._commands:
yield DiscoveryHit(
name,
callback,
help=help_text,
)
@rich.repr.auto
@total_ordering
| SimpleProvider |
python | pytorch__pytorch | torch/nn/modules/pooling.py | {
"start": 52019,
"end": 53089
} | class ____(_AdaptiveMaxPoolNd):
r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
The output size is :math:`L_{out}`, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size :math:`L_{out}`.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool1d. Default: ``False``
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
:math:`L_{out}=\text{output\_size}`.
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveMaxPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
output_size: _size_1_t
def forward(self, input: Tensor):
"""Runs the forward pass."""
return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
| AdaptiveMaxPool1d |
python | walkccc__LeetCode | solutions/2912. Number of Ways to Reach Destination in the Grid/2912-2.py | {
"start": 0,
"end": 1233
} | class ____:
def numberOfWays(
self,
n: int,
m: int,
k: int,
source: list[int],
dest: list[int],
) -> int:
MOD = 1_000_000_007
# the number of ways of `source` to `dest` using steps so far
ans = int(source == dest)
# the number of ways of `source` to dest's row using steps so far
row = int(source[0] == dest[0] and source[1] != dest[1])
# the number of ways of `source` to dest's col using steps so far
col = int(source[0] != dest[0] and source[1] == dest[1])
# the number of ways of `source` to others using steps so far
others = int(source[0] != dest[0] and source[1] != dest[1])
for _ in range(k):
nextAns = (row + col) % MOD
nextRow = (ans * (m - 1) + # -self
row * (m - 2) + # -self, -center
others) % MOD
nextCol = (ans * (n - 1) + # -self
col * (n - 2) + # -self, -center
others) % MOD
nextOthers = (row * (n - 1) + # -self
col * (m - 1) + # -self
others * (m + n - 1 - 3)) % MOD # -self, -row, -col
ans = nextAns
row = nextRow
col = nextCol
others = nextOthers
return ans
| Solution |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/handlers/github_enterprise_handler.py | {
"start": 415,
"end": 586
} | class ____(TicketingActionHandler):
group = ActionHandler.Group.TICKET_CREATION
provider_slug = IntegrationProviderSlug.GITHUB_ENTERPRISE
| GithubEnterpriseActionHandler |
python | xlwings__xlwings | xlwings/main.py | {
"start": 112968,
"end": 119799
} | class ____:
"""
The chart object is a member of the :meth:`charts <xlwings.main.Charts>` collection:
>>> import xlwings as xw
>>> sht = xw.books['Book1'].sheets[0]
>>> sht.charts[0] # or sht.charts['ChartName']
<Chart 'Chart 1' in <Sheet [Book1]Sheet1>>
"""
def __init__(self, name_or_index=None, impl=None):
if impl is not None:
self.impl = impl
elif name_or_index is not None:
self.impl = sheets.active.charts(name_or_index).impl
else:
self.impl = sheets.active.charts.add().impl
@property
def api(self):
"""
Returns the native object (``pywin32`` or ``appscript`` obj)
of the engine being used.
.. versionadded:: 0.9.0
"""
return self.impl.api
@property
def name(self):
"""
Returns or sets the name of the chart.
"""
return self.impl.name
@name.setter
def name(self, value):
self.impl.name = value
@property
def parent(self):
"""
Returns the parent of the chart.
.. versionadded:: 0.9.0
"""
# Chart sheet (parent is Book) is not supported
return Sheet(impl=self.impl.parent)
@property
def chart_type(self):
"""
Returns and sets the chart type of the chart.
The following chart types are available:
``3d_area``,
``3d_area_stacked``,
``3d_area_stacked_100``,
``3d_bar_clustered``,
``3d_bar_stacked``,
``3d_bar_stacked_100``,
``3d_column``,
``3d_column_clustered``,
``3d_column_stacked``,
``3d_column_stacked_100``,
``3d_line``,
``3d_pie``,
``3d_pie_exploded``,
``area``,
``area_stacked``,
``area_stacked_100``,
``bar_clustered``,
``bar_of_pie``,
``bar_stacked``,
``bar_stacked_100``,
``bubble``,
``bubble_3d_effect``,
``column_clustered``,
``column_stacked``,
``column_stacked_100``,
``combination``,
``cone_bar_clustered``,
``cone_bar_stacked``,
``cone_bar_stacked_100``,
``cone_col``,
``cone_col_clustered``,
``cone_col_stacked``,
``cone_col_stacked_100``,
``cylinder_bar_clustered``,
``cylinder_bar_stacked``,
``cylinder_bar_stacked_100``,
``cylinder_col``,
``cylinder_col_clustered``,
``cylinder_col_stacked``,
``cylinder_col_stacked_100``,
``doughnut``,
``doughnut_exploded``,
``line``,
``line_markers``,
``line_markers_stacked``,
``line_markers_stacked_100``,
``line_stacked``,
``line_stacked_100``,
``pie``,
``pie_exploded``,
``pie_of_pie``,
``pyramid_bar_clustered``,
``pyramid_bar_stacked``,
``pyramid_bar_stacked_100``,
``pyramid_col``,
``pyramid_col_clustered``,
``pyramid_col_stacked``,
``pyramid_col_stacked_100``,
``radar``,
``radar_filled``,
``radar_markers``,
``stock_hlc``,
``stock_ohlc``,
``stock_vhlc``,
``stock_vohlc``,
``surface``,
``surface_top_view``,
``surface_top_view_wireframe``,
``surface_wireframe``,
``xy_scatter``,
``xy_scatter_lines``,
``xy_scatter_lines_no_markers``,
``xy_scatter_smooth``,
``xy_scatter_smooth_no_markers``
.. versionadded:: 0.1.1
"""
return self.impl.chart_type
@chart_type.setter
def chart_type(self, value):
self.impl.chart_type = value
def set_source_data(self, source):
"""
Sets the source data range for the chart.
Arguments
---------
source : Range
Range object, e.g. ``xw.books['Book1'].sheets[0].range('A1')``
"""
self.impl.set_source_data(source.impl)
@property
def left(self):
"""
Returns or sets the number of points that represent the horizontal position
of the chart.
"""
return self.impl.left
@left.setter
def left(self, value):
self.impl.left = value
@property
def top(self):
"""
Returns or sets the number of points that represent the vertical position
of the chart.
"""
return self.impl.top
@top.setter
def top(self, value):
self.impl.top = value
@property
def width(self):
"""
Returns or sets the number of points that represent the width of the chart.
"""
return self.impl.width
@width.setter
def width(self, value):
self.impl.width = value
@property
def height(self):
"""
Returns or sets the number of points that represent the height of the chart.
"""
return self.impl.height
@height.setter
def height(self, value):
self.impl.height = value
def delete(self):
"""
Deletes the chart.
"""
self.impl.delete()
def to_png(self, path=None):
"""
Exports the chart as PNG picture.
Parameters
----------
path : str or path-like, default None
Path where you want to store the picture. Defaults to the name of the chart
in the same directory as the Excel file if the Excel file is stored and to
the current working directory otherwise.
.. versionadded:: 0.24.8
"""
path = utils.fspath(path)
if path is None:
directory, _ = os.path.split(self.parent.book.fullname)
if directory:
path = os.path.join(directory, self.name + ".png")
else:
path = str(Path.cwd() / self.name) + ".png"
self.impl.to_png(path)
def to_pdf(self, path=None, show=None, quality="standard"):
"""
Exports the chart as PDF.
Parameters
----------
path : str or path-like, default None
Path where you want to store the pdf. Defaults to the name of the chart in
the same directory as the Excel file if the Excel file is stored and to the
current working directory otherwise.
show : bool, default False
Once created, open the PDF file with the default application.
quality : str, default ``'standard'``
Quality of the PDF file. Can either be ``'standard'`` or ``'minimum'``.
.. versionadded:: 0.26.2
"""
return utils.to_pdf(self, path=path, show=show, quality=quality)
def __repr__(self):
return "<Chart '{0}' in {1}>".format(self.name, self.parent)
| Chart |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/shortcuts/prompt.py | {
"start": 7014,
"end": 60747
} | class ____(Generic[_T]):
"""
PromptSession for a prompt application, which can be used as a GNU Readline
replacement.
This is a wrapper around a lot of ``prompt_toolkit`` functionality and can
be a replacement for `raw_input`.
All parameters that expect "formatted text" can take either just plain text
(a unicode object), a list of ``(style_str, text)`` tuples or an HTML object.
Example usage::
s = PromptSession(message='>')
text = s.prompt()
:param message: Plain text or formatted text to be shown before the prompt.
This can also be a callable that returns formatted text.
:param multiline: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True, prefer a layout that is more adapted for multiline input.
Text after newlines is automatically indented, and search/arg input is
shown below the input, instead of replacing the prompt.
:param wrap_lines: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True (the default), automatically wrap long lines instead of
scrolling horizontally.
:param is_password: Show asterisks instead of the actual typed characters.
:param editing_mode: ``EditingMode.VI`` or ``EditingMode.EMACS``.
:param vi_mode: `bool`, if True, Identical to ``editing_mode=EditingMode.VI``.
:param complete_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable autocompletion while
typing.
:param validate_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable input validation while
typing.
:param enable_history_search: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable up-arrow parting
string matching.
:param search_ignore_case:
:class:`~prompt_toolkit.filters.Filter`. Search case insensitive.
:param lexer: :class:`~prompt_toolkit.lexers.Lexer` to be used for the
syntax highlighting.
:param validator: :class:`~prompt_toolkit.validation.Validator` instance
for input validation.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance
for input completion.
:param complete_in_thread: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Run the completer code in a
background thread in order to avoid blocking the user interface.
For ``CompleteStyle.READLINE_LIKE``, this setting has no effect. There
we always run the completions in the main thread.
:param reserve_space_for_menu: Space to be reserved for displaying the menu.
(0 means that no space needs to be reserved.)
:param auto_suggest: :class:`~prompt_toolkit.auto_suggest.AutoSuggest`
instance for input suggestions.
:param style: :class:`.Style` instance for the color scheme.
:param include_default_pygments_style: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Tell whether the default
styling for Pygments lexers has to be included. By default, this is
true, but it is recommended to be disabled if another Pygments style is
passed as the `style` argument, otherwise, two Pygments styles will be
merged.
:param style_transformation:
:class:`~prompt_toolkit.style.StyleTransformation` instance.
:param swap_light_and_dark_colors: `bool` or
:class:`~prompt_toolkit.filters.Filter`. When enabled, apply
:class:`~prompt_toolkit.style.SwapLightAndDarkStyleTransformation`.
This is useful for switching between dark and light terminal
backgrounds.
:param enable_system_prompt: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing Meta+'!' will show
a system prompt.
:param enable_suspend: `bool` or :class:`~prompt_toolkit.filters.Filter`.
Enable Control-Z style suspension.
:param enable_open_in_editor: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing 'v' in Vi mode or
C-X C-E in emacs mode will open an external editor.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` instance.
(e.g. :class:`~prompt_toolkit.clipboard.InMemoryClipboard`)
:param rprompt: Text or formatted text to be displayed on the right side.
This can also be a callable that returns (formatted) text.
:param bottom_toolbar: Formatted text or callable that returns formatted
text to be displayed at the bottom of the screen.
:param prompt_continuation: Text that needs to be displayed for a multiline
prompt continuation. This can either be formatted text or a callable
that takes a `prompt_width`, `line_number` and `wrap_count` as input
and returns formatted text. When this is `None` (the default), then
`prompt_width` spaces will be used.
:param complete_style: ``CompleteStyle.COLUMN``,
``CompleteStyle.MULTI_COLUMN`` or ``CompleteStyle.READLINE_LIKE``.
:param mouse_support: `bool` or :class:`~prompt_toolkit.filters.Filter`
to enable mouse support.
:param placeholder: Text to be displayed when no input has been given
yet. Unlike the `default` parameter, this won't be returned as part of
the output ever. This can be formatted text or a callable that returns
formatted text.
:param show_frame: `bool` or
:class:`~prompt_toolkit.filters.Filter`. When True, surround the input
with a frame.
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
:param input: `Input` object. (Note that the preferred way to change the
input/output is by creating an `AppSession`.)
:param output: `Output` object.
:param interrupt_exception: The exception type that will be raised when
there is a keyboard interrupt (control-c keypress).
:param eof_exception: The exception type that will be raised when there is
an end-of-file/exit event (control-d keypress).
"""
_fields = (
"message",
"lexer",
"completer",
"complete_in_thread",
"is_password",
"editing_mode",
"key_bindings",
"is_password",
"bottom_toolbar",
"style",
"style_transformation",
"swap_light_and_dark_colors",
"color_depth",
"cursor",
"include_default_pygments_style",
"rprompt",
"multiline",
"prompt_continuation",
"wrap_lines",
"enable_history_search",
"search_ignore_case",
"complete_while_typing",
"validate_while_typing",
"complete_style",
"mouse_support",
"auto_suggest",
"clipboard",
"validator",
"refresh_interval",
"input_processors",
"placeholder",
"enable_system_prompt",
"enable_suspend",
"enable_open_in_editor",
"reserve_space_for_menu",
"tempfile_suffix",
"tempfile",
"show_frame",
)
def __init__(
self,
message: AnyFormattedText = "",
*,
multiline: FilterOrBool = False,
wrap_lines: FilterOrBool = True,
is_password: FilterOrBool = False,
vi_mode: bool = False,
editing_mode: EditingMode = EditingMode.EMACS,
complete_while_typing: FilterOrBool = True,
validate_while_typing: FilterOrBool = True,
enable_history_search: FilterOrBool = False,
search_ignore_case: FilterOrBool = False,
lexer: Lexer | None = None,
enable_system_prompt: FilterOrBool = False,
enable_suspend: FilterOrBool = False,
enable_open_in_editor: FilterOrBool = False,
validator: Validator | None = None,
completer: Completer | None = None,
complete_in_thread: bool = False,
reserve_space_for_menu: int = 8,
complete_style: CompleteStyle = CompleteStyle.COLUMN,
auto_suggest: AutoSuggest | None = None,
style: BaseStyle | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool = False,
color_depth: ColorDepth | None = None,
cursor: AnyCursorShapeConfig = None,
include_default_pygments_style: FilterOrBool = True,
history: History | None = None,
clipboard: Clipboard | None = None,
prompt_continuation: PromptContinuationText | None = None,
rprompt: AnyFormattedText = None,
bottom_toolbar: AnyFormattedText = None,
mouse_support: FilterOrBool = False,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
key_bindings: KeyBindingsBase | None = None,
erase_when_done: bool = False,
tempfile_suffix: str | Callable[[], str] | None = ".txt",
tempfile: str | Callable[[], str] | None = None,
refresh_interval: float = 0,
show_frame: FilterOrBool = False,
input: Input | None = None,
output: Output | None = None,
interrupt_exception: type[BaseException] = KeyboardInterrupt,
eof_exception: type[BaseException] = EOFError,
) -> None:
history = history or InMemoryHistory()
clipboard = clipboard or InMemoryClipboard()
# Ensure backwards-compatibility, when `vi_mode` is passed.
if vi_mode:
editing_mode = EditingMode.VI
# Store all settings in this class.
self._input = input
self._output = output
# Store attributes.
# (All except 'editing_mode'.)
self.message = message
self.lexer = lexer
self.completer = completer
self.complete_in_thread = complete_in_thread
self.is_password = is_password
self.key_bindings = key_bindings
self.bottom_toolbar = bottom_toolbar
self.style = style
self.style_transformation = style_transformation
self.swap_light_and_dark_colors = swap_light_and_dark_colors
self.color_depth = color_depth
self.cursor = cursor
self.include_default_pygments_style = include_default_pygments_style
self.rprompt = rprompt
self.multiline = multiline
self.prompt_continuation = prompt_continuation
self.wrap_lines = wrap_lines
self.enable_history_search = enable_history_search
self.search_ignore_case = search_ignore_case
self.complete_while_typing = complete_while_typing
self.validate_while_typing = validate_while_typing
self.complete_style = complete_style
self.mouse_support = mouse_support
self.auto_suggest = auto_suggest
self.clipboard = clipboard
self.validator = validator
self.refresh_interval = refresh_interval
self.input_processors = input_processors
self.placeholder = placeholder
self.enable_system_prompt = enable_system_prompt
self.enable_suspend = enable_suspend
self.enable_open_in_editor = enable_open_in_editor
self.reserve_space_for_menu = reserve_space_for_menu
self.tempfile_suffix = tempfile_suffix
self.tempfile = tempfile
self.show_frame = show_frame
self.interrupt_exception = interrupt_exception
self.eof_exception = eof_exception
# Create buffers, layout and Application.
self.history = history
self.default_buffer = self._create_default_buffer()
self.search_buffer = self._create_search_buffer()
self.layout = self._create_layout()
self.app = self._create_application(editing_mode, erase_when_done)
def _dyncond(self, attr_name: str) -> Condition:
"""
Dynamically take this setting from this 'PromptSession' class.
`attr_name` represents an attribute name of this class. Its value
can either be a boolean or a `Filter`.
This returns something that can be used as either a `Filter`
or `Filter`.
"""
@Condition
def dynamic() -> bool:
value = cast(FilterOrBool, getattr(self, attr_name))
return to_filter(value)()
return dynamic
def _create_default_buffer(self) -> Buffer:
"""
Create and return the default input buffer.
"""
dyncond = self._dyncond
# Create buffers list.
def accept(buff: Buffer) -> bool:
"""Accept the content of the default buffer. This is called when
the validation succeeds."""
cast(Application[str], get_app()).exit(
result=buff.document.text, style="class:accepted"
)
return True # Keep text, we call 'reset' later on.
return Buffer(
name=DEFAULT_BUFFER,
# Make sure that complete_while_typing is disabled when
# enable_history_search is enabled. (First convert to Filter,
# to avoid doing bitwise operations on bool objects.)
complete_while_typing=Condition(
lambda: is_true(self.complete_while_typing)
and not is_true(self.enable_history_search)
and not self.complete_style == CompleteStyle.READLINE_LIKE
),
validate_while_typing=dyncond("validate_while_typing"),
enable_history_search=dyncond("enable_history_search"),
validator=DynamicValidator(lambda: self.validator),
completer=DynamicCompleter(
lambda: ThreadedCompleter(self.completer)
if self.complete_in_thread and self.completer
else self.completer
),
history=self.history,
auto_suggest=DynamicAutoSuggest(lambda: self.auto_suggest),
accept_handler=accept,
tempfile_suffix=lambda: to_str(self.tempfile_suffix or ""),
tempfile=lambda: to_str(self.tempfile or ""),
)
def _create_search_buffer(self) -> Buffer:
return Buffer(name=SEARCH_BUFFER)
def _create_layout(self) -> Layout:
"""
Create `Layout` for this prompt.
"""
dyncond = self._dyncond
# Create functions that will dynamically split the prompt. (If we have
# a multiline prompt.)
(
has_before_fragments,
get_prompt_text_1,
get_prompt_text_2,
) = _split_multiline_prompt(self._get_prompt)
default_buffer = self.default_buffer
search_buffer = self.search_buffer
# Create processors list.
@Condition
def display_placeholder() -> bool:
return self.placeholder is not None and self.default_buffer.text == ""
all_input_processors = [
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
ConditionalProcessor(
AppendAutoSuggestion(), has_focus(default_buffer) & ~is_done
),
ConditionalProcessor(PasswordProcessor(), dyncond("is_password")),
DisplayMultipleCursors(),
# Users can insert processors here.
DynamicProcessor(lambda: merge_processors(self.input_processors or [])),
ConditionalProcessor(
AfterInput(lambda: self.placeholder),
filter=display_placeholder,
),
]
# Create bottom toolbars.
bottom_toolbar = ConditionalContainer(
Window(
FormattedTextControl(
lambda: self.bottom_toolbar, style="class:bottom-toolbar.text"
),
style="class:bottom-toolbar",
dont_extend_height=True,
height=Dimension(min=1),
),
filter=Condition(lambda: self.bottom_toolbar is not None)
& ~is_done
& renderer_height_is_known,
)
search_toolbar = SearchToolbar(
search_buffer, ignore_case=dyncond("search_ignore_case")
)
search_buffer_control = SearchBufferControl(
buffer=search_buffer,
input_processors=[ReverseSearchProcessor()],
ignore_case=dyncond("search_ignore_case"),
)
system_toolbar = SystemToolbar(
enable_global_bindings=dyncond("enable_system_prompt")
)
def get_search_buffer_control() -> SearchBufferControl:
"Return the UIControl to be focused when searching start."
if is_true(self.multiline):
return search_toolbar.control
else:
return search_buffer_control
default_buffer_control = BufferControl(
buffer=default_buffer,
search_buffer_control=get_search_buffer_control,
input_processors=all_input_processors,
include_default_input_processors=False,
lexer=DynamicLexer(lambda: self.lexer),
preview_search=True,
)
default_buffer_window = Window(
default_buffer_control,
height=self._get_default_buffer_control_height,
get_line_prefix=partial(
self._get_line_prefix, get_prompt_text_2=get_prompt_text_2
),
wrap_lines=dyncond("wrap_lines"),
)
@Condition
def multi_column_complete_style() -> bool:
return self.complete_style == CompleteStyle.MULTI_COLUMN
# Build the layout.
# The main input, with completion menus floating on top of it.
main_input_container = FloatContainer(
HSplit(
[
ConditionalContainer(
Window(
FormattedTextControl(get_prompt_text_1),
dont_extend_height=True,
),
Condition(has_before_fragments),
),
ConditionalContainer(
default_buffer_window,
Condition(
lambda: get_app().layout.current_control
!= search_buffer_control
),
),
ConditionalContainer(
Window(search_buffer_control),
Condition(
lambda: get_app().layout.current_control
== search_buffer_control
),
),
]
),
[
# Completion menus.
# NOTE: Especially the multi-column menu needs to be
# transparent, because the shape is not always
# rectangular due to the meta-text below the menu.
Float(
xcursor=True,
ycursor=True,
transparent=True,
content=CompletionsMenu(
max_height=16,
scroll_offset=1,
extra_filter=has_focus(default_buffer)
& ~multi_column_complete_style,
),
),
Float(
xcursor=True,
ycursor=True,
transparent=True,
content=MultiColumnCompletionsMenu(
show_meta=True,
extra_filter=has_focus(default_buffer)
& multi_column_complete_style,
),
),
# The right prompt.
Float(
right=0,
top=0,
hide_when_covering_content=True,
content=_RPrompt(lambda: self.rprompt),
),
],
)
layout = HSplit(
[
# Wrap the main input in a frame, if requested.
ConditionalContainer(
Frame(main_input_container),
filter=dyncond("show_frame"),
alternative_content=main_input_container,
),
ConditionalContainer(ValidationToolbar(), filter=~is_done),
ConditionalContainer(
system_toolbar, dyncond("enable_system_prompt") & ~is_done
),
# In multiline mode, we use two toolbars for 'arg' and 'search'.
ConditionalContainer(
Window(FormattedTextControl(self._get_arg_text), height=1),
dyncond("multiline") & has_arg,
),
ConditionalContainer(search_toolbar, dyncond("multiline") & ~is_done),
bottom_toolbar,
]
)
return Layout(layout, default_buffer_window)
def _create_application(
self, editing_mode: EditingMode, erase_when_done: bool
) -> Application[_T]:
"""
Create the `Application` object.
"""
dyncond = self._dyncond
# Default key bindings.
auto_suggest_bindings = load_auto_suggest_bindings()
open_in_editor_bindings = load_open_in_editor_bindings()
prompt_bindings = self._create_prompt_bindings()
# Create application
application: Application[_T] = Application(
layout=self.layout,
style=DynamicStyle(lambda: self.style),
style_transformation=merge_style_transformations(
[
DynamicStyleTransformation(lambda: self.style_transformation),
ConditionalStyleTransformation(
SwapLightAndDarkStyleTransformation(),
dyncond("swap_light_and_dark_colors"),
),
]
),
include_default_pygments_style=dyncond("include_default_pygments_style"),
clipboard=DynamicClipboard(lambda: self.clipboard),
key_bindings=merge_key_bindings(
[
merge_key_bindings(
[
auto_suggest_bindings,
ConditionalKeyBindings(
open_in_editor_bindings,
dyncond("enable_open_in_editor")
& has_focus(DEFAULT_BUFFER),
),
prompt_bindings,
]
),
DynamicKeyBindings(lambda: self.key_bindings),
]
),
mouse_support=dyncond("mouse_support"),
editing_mode=editing_mode,
erase_when_done=erase_when_done,
reverse_vi_search_direction=True,
color_depth=lambda: self.color_depth,
cursor=DynamicCursorShapeConfig(lambda: self.cursor),
refresh_interval=self.refresh_interval,
input=self._input,
output=self._output,
)
# During render time, make sure that we focus the right search control
# (if we are searching). - This could be useful if people make the
# 'multiline' property dynamic.
"""
def on_render(app):
multiline = is_true(self.multiline)
current_control = app.layout.current_control
if multiline:
if current_control == search_buffer_control:
app.layout.current_control = search_toolbar.control
app.invalidate()
else:
if current_control == search_toolbar.control:
app.layout.current_control = search_buffer_control
app.invalidate()
app.on_render += on_render
"""
return application
def _create_prompt_bindings(self) -> KeyBindings:
"""
Create the KeyBindings for a prompt application.
"""
kb = KeyBindings()
handle = kb.add
default_focused = has_focus(DEFAULT_BUFFER)
@Condition
def do_accept() -> bool:
return not is_true(self.multiline) and self.app.layout.has_focus(
DEFAULT_BUFFER
)
@handle("enter", filter=do_accept & default_focused)
def _accept_input(event: E) -> None:
"Accept input when enter has been pressed."
self.default_buffer.validate_and_handle()
@Condition
def readline_complete_style() -> bool:
return self.complete_style == CompleteStyle.READLINE_LIKE
@handle("tab", filter=readline_complete_style & default_focused)
def _complete_like_readline(event: E) -> None:
"Display completions (like Readline)."
display_completions_like_readline(event)
@handle("c-c", filter=default_focused)
@handle("<sigint>")
def _keyboard_interrupt(event: E) -> None:
"Abort when Control-C has been pressed."
event.app.exit(exception=self.interrupt_exception(), style="class:aborting")
@Condition
def ctrl_d_condition() -> bool:
"""Ctrl-D binding is only active when the default buffer is selected
and empty."""
app = get_app()
return (
app.current_buffer.name == DEFAULT_BUFFER
and not app.current_buffer.text
)
@handle("c-d", filter=ctrl_d_condition & default_focused)
def _eof(event: E) -> None:
"Exit when Control-D has been pressed."
event.app.exit(exception=self.eof_exception(), style="class:exiting")
suspend_supported = Condition(suspend_to_background_supported)
@Condition
def enable_suspend() -> bool:
return to_filter(self.enable_suspend)()
@handle("c-z", filter=suspend_supported & enable_suspend)
def _suspend(event: E) -> None:
"""
Suspend process to background.
"""
event.app.suspend_to_background()
return kb
def prompt(
self,
# When any of these arguments are passed, this value is overwritten
# in this PromptSession.
message: AnyFormattedText | None = None,
# `message` should go first, because people call it as
# positional argument.
*,
editing_mode: EditingMode | None = None,
refresh_interval: float | None = None,
vi_mode: bool | None = None,
lexer: Lexer | None = None,
completer: Completer | None = None,
complete_in_thread: bool | None = None,
is_password: bool | None = None,
key_bindings: KeyBindingsBase | None = None,
bottom_toolbar: AnyFormattedText | None = None,
style: BaseStyle | None = None,
color_depth: ColorDepth | None = None,
cursor: AnyCursorShapeConfig | None = None,
include_default_pygments_style: FilterOrBool | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool | None = None,
rprompt: AnyFormattedText | None = None,
multiline: FilterOrBool | None = None,
prompt_continuation: PromptContinuationText | None = None,
wrap_lines: FilterOrBool | None = None,
enable_history_search: FilterOrBool | None = None,
search_ignore_case: FilterOrBool | None = None,
complete_while_typing: FilterOrBool | None = None,
validate_while_typing: FilterOrBool | None = None,
complete_style: CompleteStyle | None = None,
auto_suggest: AutoSuggest | None = None,
validator: Validator | None = None,
clipboard: Clipboard | None = None,
mouse_support: FilterOrBool | None = None,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
reserve_space_for_menu: int | None = None,
enable_system_prompt: FilterOrBool | None = None,
enable_suspend: FilterOrBool | None = None,
enable_open_in_editor: FilterOrBool | None = None,
tempfile_suffix: str | Callable[[], str] | None = None,
tempfile: str | Callable[[], str] | None = None,
show_frame: FilterOrBool | None = None,
# Following arguments are specific to the current `prompt()` call.
default: str | Document = "",
accept_default: bool = False,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
in_thread: bool = False,
inputhook: InputHook | None = None,
) -> _T:
"""
Display the prompt.
The first set of arguments is a subset of the :class:`~.PromptSession`
class itself. For these, passing in ``None`` will keep the current
values that are active in the session. Passing in a value will set the
attribute for the session, which means that it applies to the current,
but also to the next prompts.
Note that in order to erase a ``Completer``, ``Validator`` or
``AutoSuggest``, you can't use ``None``. Instead pass in a
``DummyCompleter``, ``DummyValidator`` or ``DummyAutoSuggest`` instance
respectively. For a ``Lexer`` you can pass in an empty ``SimpleLexer``.
Additional arguments, specific for this prompt:
:param default: The default input text to be shown. (This can be edited
by the user).
:param accept_default: When `True`, automatically accept the default
value without allowing the user to edit the input.
:param pre_run: Callable, called at the start of `Application.run`.
:param in_thread: Run the prompt in a background thread; block the
current thread. This avoids interference with an event loop in the
current thread. Like `Application.run(in_thread=True)`.
This method will raise ``KeyboardInterrupt`` when control-c has been
pressed (for abort) and ``EOFError`` when control-d has been pressed
(for exit).
"""
# NOTE: We used to create a backup of the PromptSession attributes and
# restore them after exiting the prompt. This code has been
# removed, because it was confusing and didn't really serve a use
# case. (People were changing `Application.editing_mode`
# dynamically and surprised that it was reset after every call.)
# NOTE 2: YES, this is a lot of repeation below...
# However, it is a very convenient for a user to accept all
# these parameters in this `prompt` method as well. We could
# use `locals()` and `setattr` to avoid the repetition, but
# then we loose the advantage of mypy and pyflakes to be able
# to verify the code.
if message is not None:
self.message = message
if editing_mode is not None:
self.editing_mode = editing_mode
if refresh_interval is not None:
self.refresh_interval = refresh_interval
if vi_mode:
self.editing_mode = EditingMode.VI
if lexer is not None:
self.lexer = lexer
if completer is not None:
self.completer = completer
if complete_in_thread is not None:
self.complete_in_thread = complete_in_thread
if is_password is not None:
self.is_password = is_password
if key_bindings is not None:
self.key_bindings = key_bindings
if bottom_toolbar is not None:
self.bottom_toolbar = bottom_toolbar
if style is not None:
self.style = style
if color_depth is not None:
self.color_depth = color_depth
if cursor is not None:
self.cursor = cursor
if include_default_pygments_style is not None:
self.include_default_pygments_style = include_default_pygments_style
if style_transformation is not None:
self.style_transformation = style_transformation
if swap_light_and_dark_colors is not None:
self.swap_light_and_dark_colors = swap_light_and_dark_colors
if rprompt is not None:
self.rprompt = rprompt
if multiline is not None:
self.multiline = multiline
if prompt_continuation is not None:
self.prompt_continuation = prompt_continuation
if wrap_lines is not None:
self.wrap_lines = wrap_lines
if enable_history_search is not None:
self.enable_history_search = enable_history_search
if search_ignore_case is not None:
self.search_ignore_case = search_ignore_case
if complete_while_typing is not None:
self.complete_while_typing = complete_while_typing
if validate_while_typing is not None:
self.validate_while_typing = validate_while_typing
if complete_style is not None:
self.complete_style = complete_style
if auto_suggest is not None:
self.auto_suggest = auto_suggest
if validator is not None:
self.validator = validator
if clipboard is not None:
self.clipboard = clipboard
if mouse_support is not None:
self.mouse_support = mouse_support
if input_processors is not None:
self.input_processors = input_processors
if placeholder is not None:
self.placeholder = placeholder
if reserve_space_for_menu is not None:
self.reserve_space_for_menu = reserve_space_for_menu
if enable_system_prompt is not None:
self.enable_system_prompt = enable_system_prompt
if enable_suspend is not None:
self.enable_suspend = enable_suspend
if enable_open_in_editor is not None:
self.enable_open_in_editor = enable_open_in_editor
if tempfile_suffix is not None:
self.tempfile_suffix = tempfile_suffix
if tempfile is not None:
self.tempfile = tempfile
if show_frame is not None:
self.show_frame = show_frame
self._add_pre_run_callables(pre_run, accept_default)
self.default_buffer.reset(
default if isinstance(default, Document) else Document(default)
)
self.app.refresh_interval = self.refresh_interval # This is not reactive.
# If we are using the default output, and have a dumb terminal. Use the
# dumb prompt.
if self._output is None and is_dumb_terminal():
with self._dumb_prompt(self.message) as dump_app:
return dump_app.run(in_thread=in_thread, handle_sigint=handle_sigint)
return self.app.run(
set_exception_handler=set_exception_handler,
in_thread=in_thread,
handle_sigint=handle_sigint,
inputhook=inputhook,
)
@contextmanager
def _dumb_prompt(self, message: AnyFormattedText = "") -> Iterator[Application[_T]]:
"""
Create prompt `Application` for prompt function for dumb terminals.
Dumb terminals have minimum rendering capabilities. We can only print
text to the screen. We can't use colors, and we can't do cursor
movements. The Emacs inferior shell is an example of a dumb terminal.
We will show the prompt, and wait for the input. We still handle arrow
keys, and all custom key bindings, but we don't really render the
cursor movements. Instead we only print the typed character that's
right before the cursor.
"""
# Send prompt to output.
self.output.write(fragment_list_to_text(to_formatted_text(self.message)))
self.output.flush()
# Key bindings for the dumb prompt: mostly the same as the full prompt.
key_bindings: KeyBindingsBase = self._create_prompt_bindings()
if self.key_bindings:
key_bindings = merge_key_bindings([self.key_bindings, key_bindings])
# Create and run application.
application = cast(
Application[_T],
Application(
input=self.input,
output=DummyOutput(),
layout=self.layout,
key_bindings=key_bindings,
),
)
def on_text_changed(_: object) -> None:
self.output.write(self.default_buffer.document.text_before_cursor[-1:])
self.output.flush()
self.default_buffer.on_text_changed += on_text_changed
try:
yield application
finally:
# Render line ending.
self.output.write("\r\n")
self.output.flush()
self.default_buffer.on_text_changed -= on_text_changed
async def prompt_async(
self,
# When any of these arguments are passed, this value is overwritten
# in this PromptSession.
message: AnyFormattedText | None = None,
# `message` should go first, because people call it as
# positional argument.
*,
editing_mode: EditingMode | None = None,
refresh_interval: float | None = None,
vi_mode: bool | None = None,
lexer: Lexer | None = None,
completer: Completer | None = None,
complete_in_thread: bool | None = None,
is_password: bool | None = None,
key_bindings: KeyBindingsBase | None = None,
bottom_toolbar: AnyFormattedText | None = None,
style: BaseStyle | None = None,
color_depth: ColorDepth | None = None,
cursor: CursorShapeConfig | None = None,
include_default_pygments_style: FilterOrBool | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool | None = None,
rprompt: AnyFormattedText | None = None,
multiline: FilterOrBool | None = None,
prompt_continuation: PromptContinuationText | None = None,
wrap_lines: FilterOrBool | None = None,
enable_history_search: FilterOrBool | None = None,
search_ignore_case: FilterOrBool | None = None,
complete_while_typing: FilterOrBool | None = None,
validate_while_typing: FilterOrBool | None = None,
complete_style: CompleteStyle | None = None,
auto_suggest: AutoSuggest | None = None,
validator: Validator | None = None,
clipboard: Clipboard | None = None,
mouse_support: FilterOrBool | None = None,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
reserve_space_for_menu: int | None = None,
enable_system_prompt: FilterOrBool | None = None,
enable_suspend: FilterOrBool | None = None,
enable_open_in_editor: FilterOrBool | None = None,
tempfile_suffix: str | Callable[[], str] | None = None,
tempfile: str | Callable[[], str] | None = None,
show_frame: FilterOrBool = False,
# Following arguments are specific to the current `prompt()` call.
default: str | Document = "",
accept_default: bool = False,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
) -> _T:
if message is not None:
self.message = message
if editing_mode is not None:
self.editing_mode = editing_mode
if refresh_interval is not None:
self.refresh_interval = refresh_interval
if vi_mode:
self.editing_mode = EditingMode.VI
if lexer is not None:
self.lexer = lexer
if completer is not None:
self.completer = completer
if complete_in_thread is not None:
self.complete_in_thread = complete_in_thread
if is_password is not None:
self.is_password = is_password
if key_bindings is not None:
self.key_bindings = key_bindings
if bottom_toolbar is not None:
self.bottom_toolbar = bottom_toolbar
if style is not None:
self.style = style
if color_depth is not None:
self.color_depth = color_depth
if cursor is not None:
self.cursor = cursor
if include_default_pygments_style is not None:
self.include_default_pygments_style = include_default_pygments_style
if style_transformation is not None:
self.style_transformation = style_transformation
if swap_light_and_dark_colors is not None:
self.swap_light_and_dark_colors = swap_light_and_dark_colors
if rprompt is not None:
self.rprompt = rprompt
if multiline is not None:
self.multiline = multiline
if prompt_continuation is not None:
self.prompt_continuation = prompt_continuation
if wrap_lines is not None:
self.wrap_lines = wrap_lines
if enable_history_search is not None:
self.enable_history_search = enable_history_search
if search_ignore_case is not None:
self.search_ignore_case = search_ignore_case
if complete_while_typing is not None:
self.complete_while_typing = complete_while_typing
if validate_while_typing is not None:
self.validate_while_typing = validate_while_typing
if complete_style is not None:
self.complete_style = complete_style
if auto_suggest is not None:
self.auto_suggest = auto_suggest
if validator is not None:
self.validator = validator
if clipboard is not None:
self.clipboard = clipboard
if mouse_support is not None:
self.mouse_support = mouse_support
if input_processors is not None:
self.input_processors = input_processors
if placeholder is not None:
self.placeholder = placeholder
if reserve_space_for_menu is not None:
self.reserve_space_for_menu = reserve_space_for_menu
if enable_system_prompt is not None:
self.enable_system_prompt = enable_system_prompt
if enable_suspend is not None:
self.enable_suspend = enable_suspend
if enable_open_in_editor is not None:
self.enable_open_in_editor = enable_open_in_editor
if tempfile_suffix is not None:
self.tempfile_suffix = tempfile_suffix
if tempfile is not None:
self.tempfile = tempfile
if show_frame is not None:
self.show_frame = show_frame
self._add_pre_run_callables(pre_run, accept_default)
self.default_buffer.reset(
default if isinstance(default, Document) else Document(default)
)
self.app.refresh_interval = self.refresh_interval # This is not reactive.
# If we are using the default output, and have a dumb terminal. Use the
# dumb prompt.
if self._output is None and is_dumb_terminal():
with self._dumb_prompt(self.message) as dump_app:
return await dump_app.run_async(handle_sigint=handle_sigint)
return await self.app.run_async(
set_exception_handler=set_exception_handler, handle_sigint=handle_sigint
)
def _add_pre_run_callables(
self, pre_run: Callable[[], None] | None, accept_default: bool
) -> None:
def pre_run2() -> None:
if pre_run:
pre_run()
if accept_default:
# Validate and handle input. We use `call_from_executor` in
# order to run it "soon" (during the next iteration of the
# event loop), instead of right now. Otherwise, it won't
# display the default value.
get_running_loop().call_soon(self.default_buffer.validate_and_handle)
self.app.pre_run_callables.append(pre_run2)
@property
def editing_mode(self) -> EditingMode:
return self.app.editing_mode
@editing_mode.setter
def editing_mode(self, value: EditingMode) -> None:
self.app.editing_mode = value
def _get_default_buffer_control_height(self) -> Dimension:
# If there is an autocompletion menu to be shown, make sure that our
# layout has at least a minimal height in order to display it.
if (
self.completer is not None
and self.complete_style != CompleteStyle.READLINE_LIKE
):
space = self.reserve_space_for_menu
else:
space = 0
if space and not get_app().is_done:
buff = self.default_buffer
# Reserve the space, either when there are completions, or when
# `complete_while_typing` is true and we expect completions very
# soon.
if buff.complete_while_typing() or buff.complete_state is not None:
return Dimension(min=space)
return Dimension()
def _get_prompt(self) -> StyleAndTextTuples:
return to_formatted_text(self.message, style="class:prompt")
def _get_continuation(
self, width: int, line_number: int, wrap_count: int
) -> StyleAndTextTuples:
"""
Insert the prompt continuation.
:param width: The width that was used for the prompt. (more or less can
be used.)
:param line_number:
:param wrap_count: Amount of times that the line has been wrapped.
"""
prompt_continuation = self.prompt_continuation
if callable(prompt_continuation):
continuation: AnyFormattedText = prompt_continuation(
width, line_number, wrap_count
)
else:
continuation = prompt_continuation
# When the continuation prompt is not given, choose the same width as
# the actual prompt.
if continuation is None and is_true(self.multiline):
continuation = " " * width
return to_formatted_text(continuation, style="class:prompt-continuation")
def _get_line_prefix(
self,
line_number: int,
wrap_count: int,
get_prompt_text_2: _StyleAndTextTuplesCallable,
) -> StyleAndTextTuples:
"""
Return whatever needs to be inserted before every line.
(the prompt, or a line continuation.)
"""
# First line: display the "arg" or the prompt.
if line_number == 0 and wrap_count == 0:
if not is_true(self.multiline) and get_app().key_processor.arg is not None:
return self._inline_arg()
else:
return get_prompt_text_2()
# For the next lines, display the appropriate continuation.
prompt_width = get_cwidth(fragment_list_to_text(get_prompt_text_2()))
return self._get_continuation(prompt_width, line_number, wrap_count)
def _get_arg_text(self) -> StyleAndTextTuples:
"'arg' toolbar, for in multiline mode."
arg = self.app.key_processor.arg
if arg is None:
# Should not happen because of the `has_arg` filter in the layout.
return []
if arg == "-":
arg = "-1"
return [("class:arg-toolbar", "Repeat: "), ("class:arg-toolbar.text", arg)]
def _inline_arg(self) -> StyleAndTextTuples:
"'arg' prefix, for in single line mode."
app = get_app()
if app.key_processor.arg is None:
return []
else:
arg = app.key_processor.arg
return [
("class:prompt.arg", "(arg: "),
("class:prompt.arg.text", str(arg)),
("class:prompt.arg", ") "),
]
# Expose the Input and Output objects as attributes, mainly for
# backward-compatibility.
@property
def input(self) -> Input:
return self.app.input
@property
def output(self) -> Output:
return self.app.output
def prompt(
message: AnyFormattedText | None = None,
*,
history: History | None = None,
editing_mode: EditingMode | None = None,
refresh_interval: float | None = None,
vi_mode: bool | None = None,
lexer: Lexer | None = None,
completer: Completer | None = None,
complete_in_thread: bool | None = None,
is_password: bool | None = None,
key_bindings: KeyBindingsBase | None = None,
bottom_toolbar: AnyFormattedText | None = None,
style: BaseStyle | None = None,
color_depth: ColorDepth | None = None,
cursor: AnyCursorShapeConfig = None,
include_default_pygments_style: FilterOrBool | None = None,
style_transformation: StyleTransformation | None = None,
swap_light_and_dark_colors: FilterOrBool | None = None,
rprompt: AnyFormattedText | None = None,
multiline: FilterOrBool | None = None,
prompt_continuation: PromptContinuationText | None = None,
wrap_lines: FilterOrBool | None = None,
enable_history_search: FilterOrBool | None = None,
search_ignore_case: FilterOrBool | None = None,
complete_while_typing: FilterOrBool | None = None,
validate_while_typing: FilterOrBool | None = None,
complete_style: CompleteStyle | None = None,
auto_suggest: AutoSuggest | None = None,
validator: Validator | None = None,
clipboard: Clipboard | None = None,
mouse_support: FilterOrBool | None = None,
input_processors: list[Processor] | None = None,
placeholder: AnyFormattedText | None = None,
reserve_space_for_menu: int | None = None,
enable_system_prompt: FilterOrBool | None = None,
enable_suspend: FilterOrBool | None = None,
enable_open_in_editor: FilterOrBool | None = None,
tempfile_suffix: str | Callable[[], str] | None = None,
tempfile: str | Callable[[], str] | None = None,
show_frame: FilterOrBool | None = None,
# Following arguments are specific to the current `prompt()` call.
default: str = "",
accept_default: bool = False,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
in_thread: bool = False,
inputhook: InputHook | None = None,
) -> str:
"""
The global `prompt` function. This will create a new `PromptSession`
instance for every call.
"""
# The history is the only attribute that has to be passed to the
# `PromptSession`, it can't be passed into the `prompt()` method.
session: PromptSession[str] = PromptSession(history=history)
return session.prompt(
message,
editing_mode=editing_mode,
refresh_interval=refresh_interval,
vi_mode=vi_mode,
lexer=lexer,
completer=completer,
complete_in_thread=complete_in_thread,
is_password=is_password,
key_bindings=key_bindings,
bottom_toolbar=bottom_toolbar,
style=style,
color_depth=color_depth,
cursor=cursor,
include_default_pygments_style=include_default_pygments_style,
style_transformation=style_transformation,
swap_light_and_dark_colors=swap_light_and_dark_colors,
rprompt=rprompt,
multiline=multiline,
prompt_continuation=prompt_continuation,
wrap_lines=wrap_lines,
enable_history_search=enable_history_search,
search_ignore_case=search_ignore_case,
complete_while_typing=complete_while_typing,
validate_while_typing=validate_while_typing,
complete_style=complete_style,
auto_suggest=auto_suggest,
validator=validator,
clipboard=clipboard,
mouse_support=mouse_support,
input_processors=input_processors,
placeholder=placeholder,
reserve_space_for_menu=reserve_space_for_menu,
enable_system_prompt=enable_system_prompt,
enable_suspend=enable_suspend,
enable_open_in_editor=enable_open_in_editor,
tempfile_suffix=tempfile_suffix,
tempfile=tempfile,
show_frame=show_frame,
default=default,
accept_default=accept_default,
pre_run=pre_run,
set_exception_handler=set_exception_handler,
handle_sigint=handle_sigint,
in_thread=in_thread,
inputhook=inputhook,
)
prompt.__doc__ = PromptSession.prompt.__doc__
def create_confirm_session(
message: AnyFormattedText, suffix: str = " (y/n) "
) -> PromptSession[bool]:
"""
Create a `PromptSession` object for the 'confirm' function.
"""
bindings = KeyBindings()
@bindings.add("y")
@bindings.add("Y")
def yes(event: E) -> None:
session.default_buffer.text = "y"
event.app.exit(result=True)
@bindings.add("n")
@bindings.add("N")
def no(event: E) -> None:
session.default_buffer.text = "n"
event.app.exit(result=False)
@bindings.add(Keys.Any)
def _(event: E) -> None:
"Disallow inserting other text."
pass
complete_message = merge_formatted_text([message, suffix])
session: PromptSession[bool] = PromptSession(
complete_message, key_bindings=bindings
)
return session
def confirm(message: AnyFormattedText = "Confirm?", suffix: str = " (y/n) ") -> bool:
"""
Display a confirmation prompt that returns True/False.
"""
session = create_confirm_session(message, suffix)
return session.prompt()
| PromptSession |
python | django__django | tests/model_enums/tests.py | {
"start": 7990,
"end": 8126
} | class ____(frozenset, models.Choices):
A = {1, 2}
B = {2, 3}
UNION = A | B
DIFFERENCE = A - B
INTERSECTION = A & B
| Set |
python | kamyu104__LeetCode-Solutions | Python/check-if-point-is-reachable.py | {
"start": 58,
"end": 423
} | class ____(object):
def isReachable(self, targetX, targetY):
"""
:type targetX: int
:type targetY: int
:rtype: bool
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
g = gcd(targetX, targetY)
return g == (g&~(g-1)) # co-prime other than factor 2
| Solution |
python | joke2k__faker | tests/providers/test_barcode.py | {
"start": 10232,
"end": 10472
} | class ____(_LocaleNorthAmericaMixin):
"""Tests fr_CA barcode provider"""
num_samples = 1000
@staticmethod
def get_provider_class():
from faker.providers.barcode.fr_CA import Provider
return Provider
| TestFrCa |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 14664,
"end": 15072
} | class ____(TypedDict):
id: str
title: str
dateCreated: str
createdBy: UserSerializerResponse
environment: list[str]
filters: DashboardFilters
lastVisited: str | None
widgetDisplay: list[str]
widgetPreview: list[dict[str, str]]
permissions: DashboardPermissionsResponse | None
isFavorited: bool
projects: list[int]
prebuiltId: int | None
| DashboardListResponse |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/padded_batch_test.py | {
"start": 1649,
"end": 16253
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
count=[32, 34],
padded_shapes=[[None], [25]],
drop_remainder=[True, False])))
def testPaddedBatchDataset(self, count, padded_shapes, drop_remainder):
seq_lens = np.random.randint(20, size=(count,)).astype(np.int32)
batch_size = 4
dataset = dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=batch_size,
drop_remainder=drop_remainder,
padded_shapes=padded_shapes)
num_full_batches = len(seq_lens) // batch_size
get_next = self.getNext(dataset)
for i in range(num_full_batches):
result = self.evaluate(get_next())
padded_len = padded_shapes[0]
if padded_len is None or padded_len == -1:
padded_len = np.max(result) if result.size > 0 else 0
self.assertEqual((batch_size, padded_len), result.shape)
for j in range(batch_size):
seq_len = seq_lens[(i * batch_size) + j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len))
if not drop_remainder and len(seq_lens) % batch_size > 0:
result = self.evaluate(get_next())
padded_len = padded_shapes[0]
if padded_len is None or padded_len == -1:
padded_len = np.max(result) if result.size > 0 else 0
self.assertEqual((len(seq_lens) % batch_size, padded_len), result.shape)
for j in range(len(seq_lens) % batch_size):
seq_len = seq_lens[num_full_batches * batch_size + j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShortPadding(self):
dataset = (
dataset_ops.Dataset.from_tensor_slices(
[6, 5, 5, 5, 5]).map(lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=4, padded_shapes=[5]))
self.assertDatasetProduces(
dataset, expected_error=(errors.DataLossError, ''))
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchEmptyTensors(self):
dataset = (
dataset_ops.Dataset.from_tensor_slices(
[0, 0, 0, 0]).map(lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=4, padded_shapes=[-1]))
self.assertDatasetProduces(dataset, expected_output=[[[], [], [], []]])
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchWithDifferetElementTypes(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
([0, 1, 2, 3], ['a', 'b', 'c', 'd']))
dataset = dataset.padded_batch(2)
self.assertDatasetProduces(dataset, [([0, 1], ['a', 'b']),
([2, 3], ['c', 'd'])])
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchWithEmptyTuple(self):
dataset = dataset_ops.Dataset.from_tensor_slices(([0, 1, 2, 3], ()))
dataset = dataset.padded_batch(2)
self.assertDatasetProduces(dataset, [([0, 1], ()), ([2, 3], ())])
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchWithNoneElement(self):
dataset = dataset_ops.Dataset.from_tensor_slices(([0, 1, 2, 3], None))
with self.assertRaises(TypeError):
dataset = dataset.padded_batch(2)
@combinations.generate(test_base.default_test_combinations())
def testDefaultPaddedShapes(self):
def fill(x):
return array_ops.fill([x], x)
dataset = (
dataset_ops.Dataset.from_tensor_slices(
[1, 2, 3, 4]).map(fill).padded_batch(batch_size=2))
self.assertDatasetProduces(
dataset,
expected_output=[[[1, 0], [2, 2]], [[3, 3, 3, 0], [4, 4, 4, 4]]])
@combinations.generate(test_base.default_test_combinations())
def testNestedDefaultPaddedShapes(self):
def fill_tuple(x):
return (x, array_ops.fill([x], x))
dataset = (
dataset_ops.Dataset.from_tensor_slices(
[1, 2, 3, 4]).map(fill_tuple).padded_batch(batch_size=2))
self.assertDatasetProduces(
dataset,
expected_output=[([1, 2], [[1, 0], [2, 2]]),
([3, 4], [[3, 3, 3, 0], [4, 4, 4, 4]])])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
padding_values=[(-1, '<end>', {'structure': ''}),
(-1, '<end>', None)])))
def testPaddedBatchDatasetNonDefaultPadding(self, padding_values):
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled), {
'structure': string_ops.as_string(filled)
})
random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32)
dataset = (
dataset_ops.Dataset.from_tensor_slices(random_seq_lens).map(fill_tuple)
.padded_batch(
4, padded_shapes=([-1], [-1], {'structure': [-1]}),
padding_values=padding_values))
get_next = self.getNext(dataset)
for i in range(8):
result = self.evaluate(get_next())
padded_len = np.max(result[0])
self.assertEqual((4, padded_len), result[0].shape)
self.assertEqual((4, padded_len), result[1].shape)
self.assertEqual((4, padded_len), result[2]['structure'].shape)
for j in range(4):
seq_len = random_seq_lens[(i * 4) + j]
self.assertAllEqual(result[0][j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[0][j, seq_len:],
[-1] * (padded_len - seq_len))
self.assertAllEqual(result[1][j, :seq_len],
[compat.as_bytes(str(seq_len))] * seq_len)
self.assertAllEqual(result[1][j, seq_len:],
[b'<end>'] * (padded_len - seq_len))
self.assertAllEqual(result[2]['structure'][j, :seq_len],
[compat.as_bytes(str(seq_len))] * seq_len)
self.assertAllEqual(result[2]['structure'][j, seq_len:],
[b''] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchDatasetUnicode(self):
# See GitHub issue 16149
def generator():
data = [[u'Простой', u'тест', u'юникода'],
[u'никогда', u'не', u'бывает', u'простым']]
for seq in data:
yield seq, [0, 1, 2, 3]
dataset = dataset_ops.Dataset.from_generator(
generator, (dtypes.string, dtypes.int32),
(tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])))
padded_dataset = dataset.padded_batch(
2, padded_shapes=([None], [None]), padding_values=('', 0))
next_element = self.getNext(padded_dataset)
self.evaluate(next_element())
@combinations.generate(test_base.graph_only_combinations())
def testPaddedBatchDatasetShapeSpecifications(self):
int_placeholder = array_ops.placeholder(dtypes.int32)
float_placeholder = array_ops.placeholder(dtypes.float32)
string_placeholder = array_ops.placeholder(dtypes.string)
input_dataset = dataset_ops.Dataset.from_tensors(
(int_placeholder, float_placeholder, string_placeholder))
# Test different ways of specifying the `padded_shapes` argument.
dynamic_padding_from_tensor_shapes = input_dataset.padded_batch(
32,
padded_shapes=(tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None, None]),
tensor_shape.TensorShape([37])))
dynamic_padding_from_lists = input_dataset.padded_batch(
32, padded_shapes=([None], [None, None], [37]))
dynamic_padding_from_lists_with_minus_one = input_dataset.padded_batch(
32, padded_shapes=([-1], [-1, -1], [37]))
dynamic_padding_from_tensors = input_dataset.padded_batch(
32,
padded_shapes=(constant_op.constant([-1], dtype=dtypes.int64),
constant_op.constant([-1, -1], dtype=dtypes.int64),
constant_op.constant([37], dtype=dtypes.int64)))
for dataset in [
dynamic_padding_from_tensor_shapes, dynamic_padding_from_lists,
dynamic_padding_from_lists_with_minus_one, dynamic_padding_from_tensors
]:
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual([None, None], dataset_output_shapes[0].as_list())
self.assertEqual([None, None, None], dataset_output_shapes[1].as_list())
self.assertEqual([None, 37], dataset_output_shapes[2].as_list())
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchSparseError(self):
st = sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=([42]), dense_shape=[1, 1])
with self.assertRaisesRegex(
TypeError, r'`padded_batch` is only supported for '
r'datasets that produce tensor elements but type spec of elements in '
r'the input dataset is not a subclass of TensorSpec: '
r'`SparseTensorSpec.*`\.$'):
_ = dataset_ops.Dataset.from_tensors(st).repeat(10).padded_batch(10)
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchRaggedError(self):
rt = ragged_tensor_value.RaggedTensorValue(
np.array([0, 42]), np.array([0, 2], dtype=np.int64))
with self.assertRaisesRegex(
TypeError, r'`padded_batch` is only supported for '
r'datasets that produce tensor elements but type spec of elements in '
r'the input dataset is not a subclass of TensorSpec: '
r'`RaggedTensorSpec.*`\.$'):
_ = dataset_ops.Dataset.from_tensors(rt).repeat(10).padded_batch(10)
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchDatasetsError(self):
ds = dataset_ops.Dataset.range(10).map(
lambda x: dataset_ops.Dataset.range(1))
with self.assertRaisesRegex(
TypeError, r'`padded_batch` is not supported for datasets of datasets'):
_ = ds.padded_batch(3)
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShapeErrorWrongRank(self):
with self.assertRaisesRegex(
ValueError, r'The padded shape \(1,\) is not compatible with the '
r'shape \(\) of the corresponding input component.'):
_ = dataset_ops.Dataset.range(10).padded_batch(5, padded_shapes=[1])
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShapeErrorTooSmall(self):
with self.assertRaisesRegex(
ValueError, r'The padded shape \(1,\) is not compatible with the '
r'shape \(3,\) of the corresponding input component.'):
_ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
5, padded_shapes=[1])
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShapeErrorShapeNotRank1(self):
with self.assertRaisesRegex(
ValueError, r'Padded shape .* must be a `tf.int64` vector tensor, '
r'but its shape was \(2, 2\).'):
_ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
5, padded_shapes=[[1, 1], [1, 1]])
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShapeErrorShapeNotInt(self):
with self.assertRaisesRegex(
TypeError, r'Padded shape .* must be a `tf.int64` vector tensor, '
r'but its element type was float32.'):
_ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
5, padded_shapes=constant_op.constant([1.5, 2., 3.]))
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShapeErrorWrongRankFromTensor(self):
with self.assertRaisesRegex(
ValueError, r'The padded shape \(1,\) is not compatible with the '
r'shape \(\) of the corresponding input component.'):
shape_as_tensor = constant_op.constant([1], dtype=dtypes.int64)
_ = dataset_ops.Dataset.range(10).padded_batch(
5, padded_shapes=shape_as_tensor)
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchShapeErrorDefaultShapeWithUnknownRank(self):
with self.assertRaisesRegex(ValueError, r'`padded_shapes`.*unknown rank'):
ds = dataset_ops.Dataset.from_generator(
lambda: iter([1, 2, 3]), output_types=dtypes.int32)
ds.padded_batch(2)
@combinations.generate(test_base.graph_only_combinations())
def testPaddedBatchShapeErrorPlaceholder(self):
with self.assertRaisesRegex(
ValueError,
r'The padded shape \((\?|None), (\?|None)\) is not compatible with the '
r'shape \(\) of the corresponding input component.'):
shape_as_tensor = array_ops.placeholder(dtypes.int64, shape=[2])
_ = dataset_ops.Dataset.range(10).padded_batch(
5, padded_shapes=shape_as_tensor)
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchBfloat16(self):
ds = dataset_ops.Dataset.range(5)
ds = ds.map(lambda x: math_ops.cast(x, dtypes.bfloat16))
ds = ds.padded_batch(10)
self.assertDatasetProduces(
ds, expected_output=[[0.0, 1.0, 2.0, 3.0, 4.0]])
@combinations.generate(test_base.default_test_combinations())
def testDefaultPaddedValueShapes(self):
def fill(x):
return array_ops.fill([x], x)
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4]).map(fill),
dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4]).map(fill)))
dataset = dataset.padded_batch(batch_size=2, padding_values=-1)
self.assertDatasetProduces(
dataset,
expected_output=[([[1, -1], [2, 2]], [[1, -1], [2, 2]]),
([[3, 3, 3, -1], [4, 4, 4, 4]], [[3, 3, 3, -1],
[4, 4, 4, 4]])])
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.range(5).padded_batch(5, name='padded_batch')
self.assertDatasetProduces(dataset, [list(range(5))])
| PaddedBatchTest |
python | walkccc__LeetCode | solutions/3285. Find Indices of Stable Mountains/3285.py | {
"start": 0,
"end": 180
} | class ____:
def stableMountains(self, height: list[int], threshold: int) -> list[int]:
return [i for i in range(1, len(height))
if height[i - 1] > threshold]
| Solution |
python | sqlalchemy__sqlalchemy | test/sql/test_ddlemit.py | {
"start": 646,
"end": 21899
} | class ____(fixtures.TestBase):
def _mock_connection(self, item_exists):
def has_item(connection, name, schema):
return item_exists(name)
def has_index(connection, tablename, idxname, schema):
return item_exists(idxname)
return Mock(
dialect=Mock(
supports_sequences=True,
has_table=Mock(side_effect=has_item),
has_sequence=Mock(side_effect=has_item),
has_index=Mock(side_effect=has_index),
supports_comments=True,
inline_comments=False,
),
_schema_translate_map=None,
)
def _mock_create_fixture(
self, checkfirst, tables, item_exists=lambda item: False
):
connection = self._mock_connection(item_exists)
return SchemaGenerator(
connection.dialect,
connection,
checkfirst=checkfirst,
tables=tables,
)
def _mock_drop_fixture(
self, checkfirst, tables, item_exists=lambda item: True
):
connection = self._mock_connection(item_exists)
return SchemaDropper(
connection.dialect,
connection,
checkfirst=checkfirst,
tables=tables,
)
def _table_fixture(self):
m = MetaData()
return (m,) + tuple(
Table("t%d" % i, m, Column("x", Integer)) for i in range(1, 6)
)
def _table_and_view_fixture(self):
m = MetaData()
tables = [
Table("t%d" % i, m, Column("x", Integer)) for i in range(1, 4)
]
t1, t2, t3 = tables
views = [
CreateView(select(t1), "v1", metadata=m).table,
CreateView(select(t3), "v2", metadata=m).table,
]
return (m,) + tuple(tables) + tuple(views)
def _use_alter_fixture_one(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("t2id", Integer, ForeignKey("t2.id")),
)
t2 = Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("t1id", Integer, ForeignKey("t1.id")),
)
return m, t1, t2
def _fk_fixture_one(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("t2id", Integer, ForeignKey("t2.id")),
)
t2 = Table("t2", m, Column("id", Integer, primary_key=True))
return m, t1, t2
def _table_index_fixture(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), Column("y", Integer))
i1 = Index("my_idx", t1.c.x, t1.c.y)
return m, t1, i1
def _table_seq_fixture(self):
m = MetaData()
s1 = Sequence("s1")
s2 = Sequence("s2")
t1 = Table("t1", m, Column("x", Integer, s1, primary_key=True))
t2 = Table("t2", m, Column("x", Integer, s2, primary_key=True))
return m, t1, t2, s1, s2
def _table_comment_fixture(self):
m = MetaData()
c1 = Column("id", Integer, comment="c1")
t1 = Table("t1", m, c1, comment="t1")
return m, t1, c1
def test_comment(self):
m, t1, c1 = self._table_comment_fixture()
generator = self._mock_create_fixture(
False, [t1], item_exists=lambda t: t not in ("t1",)
)
self._assert_create_comment([t1, t1, c1], generator, m)
_true_seq = testing.combinations(
True,
CheckFirst.ALL,
CheckFirst.SEQUENCES | CheckFirst.TABLES,
argnames="checkfirst",
)
@_true_seq
def test_create_seq_checkfirst(self, checkfirst):
m, t1, t2, s1, s2 = self._table_seq_fixture()
generator = self._mock_create_fixture(
checkfirst, [t1, t2], item_exists=lambda t: t not in ("t1", "s1")
)
self._assert_create([t1, s1], generator, m)
@_true_seq
def test_drop_seq_checkfirst(self, checkfirst):
m, t1, t2, s1, s2 = self._table_seq_fixture()
generator = self._mock_drop_fixture(
checkfirst, [t1, t2], item_exists=lambda t: t in ("t1", "s1")
)
self._assert_drop([t1, s1], generator, m)
def test_create_table_index_checkfirst(self):
"""create table that doesn't exist should not require a check
on the index"""
m, t1, i1 = self._table_index_fixture()
def exists(name):
if name == "my_idx":
raise NotImplementedError()
else:
return False
generator = self._mock_create_fixture(True, [t1], item_exists=exists)
self._assert_create([t1, i1], generator, t1)
def test_create_table_exists_index_checkfirst(self):
"""for the moment, if the table *does* exist, we are not checking
for the index. this can possibly be changed."""
m, t1, i1 = self._table_index_fixture()
def exists(name):
if name == "my_idx":
raise NotImplementedError()
else:
return True
generator = self._mock_create_fixture(True, [t1], item_exists=exists)
# nothing is created
self._assert_create([], generator, t1)
def test_drop_table_index_checkfirst(self):
m, t1, i1 = self._table_index_fixture()
def exists(name):
if name == "my_idx":
raise NotImplementedError()
else:
return True
generator = self._mock_drop_fixture(True, [t1], item_exists=exists)
self._assert_drop_tables([t1], generator, t1, True)
_true_index = testing.combinations(
True, CheckFirst.ALL, CheckFirst.INDEXES, argnames="checkfirst"
)
@_true_index
def test_create_index_checkfirst_exists(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_create_fixture(
checkfirst, [i1], item_exists=lambda idx: True
)
self._assert_create_index([], generator, i1, checkfirst)
@_true_index
def test_create_index_nocheck_exists(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_create_fixture(
checkfirst, [i1], item_exists=lambda idx: False
)
self._assert_create_index([i1], generator, i1, checkfirst)
_false_index = testing.combinations(
False,
CheckFirst.NONE,
CheckFirst.TABLES,
CheckFirst.SEQUENCES,
CheckFirst.TYPES,
argnames="checkfirst",
)
@_false_index
def test_create_index_nocheck_doesnt_exist(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_create_fixture(
checkfirst, [i1], item_exists=lambda idx: False
)
self._assert_create_index([i1], generator, i1, checkfirst)
@_false_index
def test_drop_index_checkfirst_exists(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_drop_fixture(
checkfirst, [i1], item_exists=lambda idx: True
)
self._assert_drop_index([i1], generator, i1, checkfirst)
@_true_index
def test_drop_index_checkfirst_doesnt_exist(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_drop_fixture(
checkfirst, [i1], item_exists=lambda idx: False
)
self._assert_drop_index([], generator, i1, checkfirst)
@_false_index
def test_drop_index_nocheck_exists(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_drop_fixture(
checkfirst, [i1], item_exists=lambda idx: True
)
self._assert_drop_index([i1], generator, i1, checkfirst)
@_false_index
def test_drop_index_nocheck_doesnt_exist(self, checkfirst):
m, t1, i1 = self._table_index_fixture()
generator = self._mock_drop_fixture(
checkfirst, [i1], item_exists=lambda idx: False
)
self._assert_drop_index([i1], generator, i1, checkfirst)
_true_table = testing.combinations(
True, CheckFirst.ALL, CheckFirst.TABLES, argnames="checkfirst"
)
@_true_table
def test_create_collection_checkfirst(self, checkfirst):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_create_fixture(
checkfirst,
[t2, t3, t4],
item_exists=lambda t: t not in ("t2", "t4"),
)
self._assert_create_tables([t2, t4], generator, m, checkfirst)
def test_drop_collection_checkfirst(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_drop_fixture(
True, [t2, t3, t4], item_exists=lambda t: t in ("t2", "t4")
)
self._assert_drop_tables([t2, t4], generator, m, True)
_false_table = testing.combinations(
False,
CheckFirst.NONE,
CheckFirst.INDEXES,
CheckFirst.SEQUENCES,
CheckFirst.TYPES,
argnames="checkfirst",
)
@_false_table
def test_create_collection_nocheck(self, checkfirst):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_create_fixture(
checkfirst,
[t2, t3, t4],
item_exists=lambda t: t not in ("t2", "t4"),
)
self._assert_create_tables([t2, t3, t4], generator, m, checkfirst)
def test_create_empty_collection(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_create_fixture(
True, [], item_exists=lambda t: t not in ("t2", "t4")
)
self._assert_create_tables([], generator, m, True)
def test_drop_empty_collection(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_drop_fixture(
True, [], item_exists=lambda t: t in ("t2", "t4")
)
self._assert_drop_tables([], generator, m, True)
def test_drop_collection_nocheck(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_drop_fixture(
False, [t2, t3, t4], item_exists=lambda t: t in ("t2", "t4")
)
self._assert_drop_tables([t2, t3, t4], generator, m, False)
def test_create_metadata_checkfirst(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_create_fixture(
True, None, item_exists=lambda t: t not in ("t2", "t4")
)
self._assert_create_tables([t2, t4], generator, m, True)
def test_drop_metadata_checkfirst(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_drop_fixture(
True, None, item_exists=lambda t: t in ("t2", "t4")
)
self._assert_drop_tables([t2, t4], generator, m, True)
def test_create_metadata_nocheck(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_create_fixture(
False, None, item_exists=lambda t: t not in ("t2", "t4")
)
self._assert_create_tables([t1, t2, t3, t4, t5], generator, m, False)
def test_drop_metadata_nocheck(self):
m, t1, t2, t3, t4, t5 = self._table_fixture()
generator = self._mock_drop_fixture(
False, None, item_exists=lambda t: t in ("t2", "t4")
)
self._assert_drop_tables([t1, t2, t3, t4, t5], generator, m, False)
def test_create_metadata_wviews_checkfirst(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_create_fixture(
True, None, item_exists=lambda t: t not in ("t2", "v2")
)
self._assert_create_tables([t2, v2], generator, m, True)
def test_drop_metadata_wviews_checkfirst(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_drop_fixture(
True, None, item_exists=lambda t: t in ("t2", "v2")
)
self._assert_drop_tables([t2, v2], generator, m, True)
def test_create_metadata_wviews_check_tables_only(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_create_fixture(
CheckFirst.TABLES,
None,
item_exists=lambda t: t not in ("t2", "v2"),
)
self._assert_create_tables(
[t2, v1, v2], generator, m, CheckFirst.TABLES
)
def test_drop_metadata_wviews_check_tables_only(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_drop_fixture(
CheckFirst.TABLES, None, item_exists=lambda t: t in ("t2", "v2")
)
self._assert_drop_tables([t2, v1, v2], generator, m, CheckFirst.TABLES)
def test_create_metadata_wviews_check_views_only(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_create_fixture(
CheckFirst.VIEWS, None, item_exists=lambda t: t not in ("t2", "v2")
)
self._assert_create_tables(
[t1, t2, t3, v2], generator, m, CheckFirst.VIEWS
)
def test_drop_metadata_wviews_check_views_only(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_drop_fixture(
CheckFirst.VIEWS, None, item_exists=lambda t: t in ("t2", "v2")
)
self._assert_drop_tables(
[t1, t2, t3, v2], generator, m, CheckFirst.VIEWS
)
def test_create_metadata_wviews_nocheck(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_create_fixture(
False, None, item_exists=lambda t: t not in ("t2", "v2")
)
self._assert_create_tables([t1, t2, t3, v1, v2], generator, m, False)
def test_drop_metadata_wviews_nocheck(self):
m, t1, t2, t3, v1, v2 = self._table_and_view_fixture()
generator = self._mock_drop_fixture(
False, None, item_exists=lambda t: t in ("t2", "v2")
)
self._assert_drop_tables([t1, t2, t3, v1, v2], generator, m, False)
def test_create_metadata_auto_alter_fk(self):
m, t1, t2 = self._use_alter_fixture_one()
generator = self._mock_create_fixture(False, [t1, t2])
self._assert_create_w_alter(
[t1, t2]
+ list(t1.foreign_key_constraints)
+ list(t2.foreign_key_constraints),
generator,
m,
)
def test_create_metadata_inline_fk(self):
m, t1, t2 = self._fk_fixture_one()
generator = self._mock_create_fixture(False, [t1, t2])
self._assert_create_w_alter(
[t1, t2]
+ list(t1.foreign_key_constraints)
+ list(t2.foreign_key_constraints),
generator,
m,
)
def _assert_create_tables(self, elements, generator, argument, checkfirst):
self._assert_ddl(
(schema.CreateTable, schema.CreateView),
elements,
generator,
argument,
)
tables = []
if CheckFirst(checkfirst) & CheckFirst.TABLES:
if generator.tables is not None:
tables.extend([t for t in generator.tables if not t.is_view])
elif isinstance(argument, MetaData):
tables.extend(
[t for t in argument.tables.values() if not t.is_view]
)
else:
assert False, "don't know what tables we are checking"
if CheckFirst(checkfirst) & CheckFirst.VIEWS:
if generator.tables is not None:
tables.extend([t for t in generator.tables if t.is_view])
elif isinstance(argument, MetaData):
tables.extend(
[t for t in argument.tables.values() if t.is_view]
)
else:
assert False, "don't know what views we are checking"
if tables:
eq_(
generator.dialect.has_table.mock_calls,
[
mock.call(mock.ANY, tablename, schema=mock.ANY)
for tablename in [t.name for t in tables]
],
)
else:
eq_(
generator.dialect.has_index.mock_calls,
[],
)
def _assert_drop_tables(self, elements, generator, argument, checkfirst):
self._assert_ddl(
(schema.DropTable, schema.DropView), elements, generator, argument
)
tables = []
if CheckFirst(checkfirst) & CheckFirst.TABLES:
if generator.tables is not None:
tables.extend([t for t in generator.tables if not t.is_view])
elif isinstance(argument, MetaData):
tables.extend(
[t for t in argument.tables.values() if not t.is_view]
)
else:
assert False, "don't know what tables we are checking"
if CheckFirst(checkfirst) & CheckFirst.VIEWS:
if generator.tables is not None:
tables.extend([t for t in generator.tables if t.is_view])
elif isinstance(argument, MetaData):
tables.extend(
[t for t in argument.tables.values() if t.is_view]
)
else:
assert False, "don't know what views we are checking"
if tables:
eq_(
generator.dialect.has_table.mock_calls,
[
mock.call(mock.ANY, tablename, schema=mock.ANY)
for tablename in [t.name for t in tables]
],
)
else:
eq_(
generator.dialect.has_index.mock_calls,
[],
)
def _assert_create(self, elements, generator, argument):
self._assert_ddl(
(schema.CreateTable, schema.CreateSequence, schema.CreateIndex),
elements,
generator,
argument,
)
def _assert_drop(self, elements, generator, argument):
self._assert_ddl(
(schema.DropTable, schema.DropSequence),
elements,
generator,
argument,
)
def _assert_create_w_alter(self, elements, generator, argument):
self._assert_ddl(
(schema.CreateTable, schema.CreateSequence, schema.AddConstraint),
elements,
generator,
argument,
)
def _assert_drop_w_alter(self, elements, generator, argument):
self._assert_ddl(
(schema.DropTable, schema.DropSequence, schema.DropConstraint),
elements,
generator,
argument,
)
def _assert_create_comment(self, elements, generator, argument):
self._assert_ddl(
(
schema.CreateTable,
schema.SetTableComment,
schema.SetColumnComment,
),
elements,
generator,
argument,
)
def _assert_create_index(self, elements, generator, argument, checkfirst):
self._assert_ddl((schema.CreateIndex,), elements, generator, argument)
if CheckFirst(checkfirst) & CheckFirst.INDEXES:
tablename = argument.table.name
indexname = argument.name
eq_(
generator.dialect.has_index.mock_calls,
[mock.call(mock.ANY, tablename, indexname, schema=mock.ANY)],
)
else:
eq_(
generator.dialect.has_index.mock_calls,
[],
)
def _assert_drop_index(self, elements, generator, argument, checkfirst):
self._assert_ddl((schema.DropIndex,), elements, generator, argument)
if CheckFirst(checkfirst) & CheckFirst.INDEXES:
tablename = argument.table.name
indexname = argument.name
eq_(
generator.dialect.has_index.mock_calls,
[mock.call(mock.ANY, tablename, indexname, schema=mock.ANY)],
)
else:
eq_(
generator.dialect.has_index.mock_calls,
[],
)
def _assert_ddl(self, ddl_cls, elements, generator, argument):
elements = list(elements)
generator.traverse_single(argument)
for call_ in generator.connection.execute.mock_calls:
c = call_[1][0]
assert isinstance(c, ddl_cls)
assert c.element in elements, (
"element %r was not expected" % c.element
)
elements.remove(c.element)
if getattr(c, "include_foreign_key_constraints", None) is not None:
elements[:] = [
e
for e in elements
if e not in set(c.include_foreign_key_constraints)
]
assert not elements, "elements remain in list: %r" % elements
| EmitDDLTest |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 7585,
"end": 9661
} | class ____(ToolBase):
"""
Change to the current cursor while inaxes.
This tool, keeps track of all `ToolToggleBase` derived tools, and updates
the cursor when a tool gets triggered.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._id_drag = None
self._current_tool = None
self._default_cursor = cursors.POINTER
self._last_cursor = self._default_cursor
self.toolmanager.toolmanager_connect('tool_added_event',
self._add_tool_cbk)
for tool in self.toolmanager.tools.values(): # process current tools
self._add_tool_cbk(mpl.backend_managers.ToolEvent(
'tool_added_event', self.toolmanager, tool))
def set_figure(self, figure):
if self._id_drag:
self.canvas.mpl_disconnect(self._id_drag)
super().set_figure(figure)
if figure:
self._id_drag = self.canvas.mpl_connect(
'motion_notify_event', self._set_cursor_cbk)
def _add_tool_cbk(self, event):
"""Process every newly added tool."""
if getattr(event.tool, 'cursor', None) is not None:
self.toolmanager.toolmanager_connect(
f'tool_trigger_{event.tool.name}', self._tool_trigger_cbk)
def _tool_trigger_cbk(self, event):
self._current_tool = event.tool if event.tool.toggled else None
self._set_cursor_cbk(event.canvasevent)
def _set_cursor_cbk(self, event):
if not event or not self.canvas:
return
if (self._current_tool and getattr(event, "inaxes", None)
and event.inaxes.get_navigate()):
if self._last_cursor != self._current_tool.cursor:
self.canvas.set_cursor(self._current_tool.cursor)
self._last_cursor = self._current_tool.cursor
elif self._last_cursor != self._default_cursor:
self.canvas.set_cursor(self._default_cursor)
self._last_cursor = self._default_cursor
| ToolSetCursor |
python | doocs__leetcode | solution/2800-2899/2894.Divisible and Non-divisible Sums Difference/Solution.py | {
"start": 0,
"end": 135
} | class ____:
def differenceOfSums(self, n: int, m: int) -> int:
return sum(i if i % m else -i for i in range(1, n + 1))
| Solution |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 24783,
"end": 25366
} | class ____(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ("node", "arg", "ctx")
node: Expr
arg: Expr
ctx: str
def as_const(self, eval_ctx: EvalContext | None = None) -> t.Any:
if self.ctx != "load":
raise Impossible()
eval_ctx = get_eval_context(self, eval_ctx)
try:
return eval_ctx.environment.getitem(
self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
)
except Exception as e:
raise Impossible() from e
| Getitem |
python | pypa__setuptools | setuptools/_distutils/text_file.py | {
"start": 209,
"end": 12101
} | class ____:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using 'io.open()'.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
errors [default: 'strict']
error handler used to decode the file content
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = {
'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
'errors': 'strict',
}
def __init__(self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError(
"you must supply either or both of 'filename' and 'file'"
)
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr(self, opt, options[opt])
else:
setattr(self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError(f"invalid TextFile option '{opt}'")
if file is None:
self.open(filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open(self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = open(self.filename, errors=self.errors, encoding='utf-8')
self.current_line = 0
def close(self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
file = self.file
self.file = None
self.filename = None
self.current_line = None
file.close()
def gen_error(self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if isinstance(line, (list, tuple)):
outmsg.append("lines {}-{}: ".format(*line))
else:
outmsg.append(f"line {int(line)}: ")
outmsg.append(str(msg))
return "".join(outmsg)
def error(self, msg, line=None):
raise ValueError("error: " + self.gen_error(msg, line))
def warn(self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline(self): # noqa: C901
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while True:
# read the line, make it None if EOF
line = self.file.readline()
if line == '':
line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = line.find("#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos - 1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if line.strip() == "":
continue
else: # it's an escaped "#"
line = line.replace("\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn("continuation line immediately precedes end-of-file")
return buildup_line
if self.collapse_join:
line = line.lstrip()
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if isinstance(self.current_line, list):
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line, self.current_line + 1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if isinstance(self.current_line, list):
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = line.strip()
elif self.lstrip_ws:
line = line.lstrip()
elif self.rstrip_ws:
line = line.rstrip()
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if line in ('', '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
def readlines(self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while True:
line = self.readline()
if line is None:
return lines
lines.append(line)
def unreadline(self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append(line)
| TextFile |
python | spack__spack | lib/spack/spack/vendor/archspec/cpu/schema.py | {
"start": 386,
"end": 3710
} | class ____(collections.abc.MutableMapping):
"""Lazy dictionary that gets constructed on first access to any object key
Args:
factory (callable): factory function to construct the dictionary
"""
def __init__(self, factory, *args, **kwargs):
self.factory = factory
self.args = args
self.kwargs = kwargs
self._data = None
@property
def data(self):
"""Returns the lazily constructed dictionary"""
if self._data is None:
self._data = self.factory(*self.args, **self.kwargs)
return self._data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
#: Environment variable that might point to a directory with a user defined JSON file
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR"
#: Environment variable that might point to a directory with extensions to JSON files
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR"
def _json_file(
filename: str, allow_custom: bool = False
) -> Tuple[pathlib.Path, Optional[pathlib.Path]]:
"""Given a filename, returns the absolute path for the main JSON file, and an
optional absolute path for an extension JSON file.
Args:
filename: filename for the JSON file
allow_custom: if True, allows overriding the location where the file resides
"""
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
json_dir = json_dir.absolute()
json_file = json_dir / filename
extension_file = None
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
extension_dir.absolute()
extension_file = extension_dir / filename
return json_file, extension_file
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
with open(json_file, "r", encoding="utf-8") as file:
data = json.load(file)
if not extension_file or not extension_file.exists():
return data
with open(extension_file, "r", encoding="utf-8") as file:
extension_data = json.load(file)
top_level_sections = list(data.keys())
for key in top_level_sections:
if key not in extension_data:
continue
data[key].update(extension_data[key])
return data
#: In memory representation of the data in microarchitectures.json, loaded on first access
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True))
#: JSON schema for microarchitectures.json, loaded on first access
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json"))
#: Information on how to call 'cpuid' to get information on the HOST CPU
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
#: JSON schema for cpuid.json, loaded on first access
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))
| LazyDictionary |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_spans_performance.py | {
"start": 61703,
"end": 65892
} | class ____(OrganizationEventsSpansEndpointTestBase):
URL = "sentry-api-0-organization-events-spans-stats"
def test_require_span_param(self) -> None:
response = self.client.get(
self.url,
data={"project": self.project.id},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"span": [ErrorDetail("This field is required.", code="required")]}
def test_bad_span_param(self) -> None:
response = self.client.get(
self.url,
data={"project": self.project.id, "span": ["http.server"]},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"span": [
ErrorDetail(
"span must consist of of a span op and a valid 16 character hex delimited by a colon (:)",
code="invalid",
)
]
}
response = self.client.get(
self.url,
data={"project": self.project.id, "span": ["http.server:foo"]},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"span": [
ErrorDetail(
"`spanGroup` must be a valid 16 character hex (containing only digits, or a-f characters)",
code="invalid",
)
]
}
@patch("sentry.api.endpoints.organization_events_spans_performance.raw_snql_query")
def test_one_span(self, mock_raw_snql_query: MagicMock) -> None:
mock_raw_snql_query.side_effect = [{"data": []}]
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": f"http.server:{'ab' * 8}",
"yAxis": [
"percentileArray(spans_exclusive_time, 0.75)",
"percentileArray(spans_exclusive_time, 0.95)",
"percentileArray(spans_exclusive_time, 0.99)",
],
"start": self.day_ago,
"end": self.day_ago + timedelta(hours=2),
"interval": "1h",
},
format="json",
)
assert response.status_code == 200, response.content
# ensure that the result is a proper time series
data = response.data
series_names = [
f"percentileArray(spans_exclusive_time, 0.{percentile})"
for percentile in ["75", "95", "99"]
]
assert set(data.keys()) == set(series_names)
for i, series_name in enumerate(series_names):
series = data[series_name]
assert series["order"] == i
assert [attrs for _, attrs in series["data"]] == [
[{"count": 0}],
[{"count": 0}],
]
assert mock_raw_snql_query.call_count == 1
query = mock_raw_snql_query.call_args_list[0][0][0].query
# ensure the specified y axes are in the select
for percentile in ["75", "95", "99"]:
assert (
Function(
f"quantile(0.{percentile.rstrip('0')})",
[Function("arrayJoin", [Column("spans.exclusive_time")])],
f"percentileArray_spans_exclusive_time_0_{percentile}",
)
in query.select
)
spans_op = Function("arrayJoin", [Column("spans.op")], "array_join_spans_op")
spans_group = Function("arrayJoin", [Column("spans.group")], "array_join_spans_group")
# ensure the two span columns are in the group by
for column in [spans_op, spans_group]:
assert column in query.groupby
# ensure there is a condition on the span
assert (
Condition(
Function("tuple", [spans_op, spans_group]),
Op.IN,
Function("tuple", [Function("tuple", ["http.server", "ab" * 8])]),
)
in query.where
)
| OrganizationEventsSpansStatsEndpointTest |
python | networkx__networkx | networkx/utils/tests/test_config.py | {
"start": 526,
"end": 8717
} | class ____(Config):
pass
@pytest.mark.parametrize("cfg", [EmptyConfig(), Config()])
def test_config_empty(cfg):
assert dir(cfg) == []
with pytest.raises(AttributeError):
cfg.x = 1
with pytest.raises(KeyError):
cfg["x"] = 1
with pytest.raises(AttributeError):
cfg.x
with pytest.raises(KeyError):
cfg["x"]
assert len(cfg) == 0
assert "x" not in cfg
assert cfg == cfg
assert cfg.get("x", 2) == 2
assert set(cfg.keys()) == set()
assert set(cfg.values()) == set()
assert set(cfg.items()) == set()
cfg2 = pickle.loads(pickle.dumps(cfg))
assert cfg == cfg2
assert isinstance(cfg, collections.abc.Collection)
assert isinstance(cfg, collections.abc.Mapping)
def test_config_subclass():
with pytest.raises(TypeError, match="missing 2 required keyword-only"):
ExampleConfig()
with pytest.raises(ValueError, match="x must be positive"):
ExampleConfig(x=0, y="foo")
with pytest.raises(TypeError, match="unexpected keyword"):
ExampleConfig(x=1, y="foo", z="bad config")
with pytest.raises(TypeError, match="unexpected keyword"):
EmptyConfig(z="bad config")
cfg = ExampleConfig(x=1, y="foo")
assert cfg.x == 1
assert cfg["x"] == 1
assert cfg["y"] == "foo"
assert cfg.y == "foo"
assert "x" in cfg
assert "y" in cfg
assert "z" not in cfg
assert len(cfg) == 2
assert set(iter(cfg)) == {"x", "y"}
assert set(cfg.keys()) == {"x", "y"}
assert set(cfg.values()) == {1, "foo"}
assert set(cfg.items()) == {("x", 1), ("y", "foo")}
assert dir(cfg) == ["x", "y"]
cfg.x = 2
cfg["y"] = "bar"
assert cfg["x"] == 2
assert cfg.y == "bar"
with pytest.raises(TypeError, match="can't be deleted"):
del cfg.x
with pytest.raises(TypeError, match="can't be deleted"):
del cfg["y"]
assert cfg.x == 2
assert cfg == cfg
assert cfg == ExampleConfig(x=2, y="bar")
assert cfg != ExampleConfig(x=3, y="baz")
assert cfg != Config(x=2, y="bar")
with pytest.raises(TypeError, match="y must be a str"):
cfg["y"] = 5
with pytest.raises(ValueError, match="x must be positive"):
cfg.x = -5
assert cfg.get("x", 10) == 2
with pytest.raises(AttributeError):
cfg.z = 5
with pytest.raises(KeyError):
cfg["z"] = 5
with pytest.raises(AttributeError):
cfg.z
with pytest.raises(KeyError):
cfg["z"]
cfg2 = pickle.loads(pickle.dumps(cfg))
assert cfg == cfg2
assert cfg.__doc__ == "Example configuration."
assert cfg2.__doc__ == "Example configuration."
def test_config_defaults():
class DefaultConfig(Config):
x: int = 0
y: int
cfg = DefaultConfig(y=1)
assert cfg.x == 0
cfg = DefaultConfig(x=2, y=1)
assert cfg.x == 2
def test_nxconfig():
assert isinstance(nx.config.backend_priority, BackendPriorities)
assert isinstance(nx.config.backend_priority.algos, list)
assert isinstance(nx.config.backends, Config)
with pytest.raises(TypeError, match="must be a list of backend names"):
nx.config.backend_priority.algos = "nx_loopback"
with pytest.raises(ValueError, match="Unknown backend when setting"):
nx.config.backend_priority.algos = ["this_almost_certainly_is_not_a_backend"]
with pytest.raises(TypeError, match="must be a Config of backend configs"):
nx.config.backends = {}
with pytest.raises(TypeError, match="must be a Config of backend configs"):
nx.config.backends = Config(plausible_backend_name={})
with pytest.raises(ValueError, match="Unknown backend when setting"):
nx.config.backends = Config(this_almost_certainly_is_not_a_backend=Config())
with pytest.raises(TypeError, match="must be True or False"):
nx.config.cache_converted_graphs = "bad value"
with pytest.raises(TypeError, match="must be a set of "):
nx.config.warnings_to_ignore = 7
with pytest.raises(ValueError, match="Unknown warning "):
nx.config.warnings_to_ignore = {"bad value"}
prev = nx.config.backend_priority
try:
nx.config.backend_priority = ["networkx"]
assert isinstance(nx.config.backend_priority, BackendPriorities)
assert nx.config.backend_priority.algos == ["networkx"]
finally:
nx.config.backend_priority = prev
def test_nxconfig_context():
# We do some special handling so that `nx.config.backend_priority = val`
# actually does `nx.config.backend_priority.algos = val`.
orig = nx.config.backend_priority.algos
val = [] if orig else ["networkx"]
assert orig != val
assert nx.config.backend_priority.algos != val
with nx.config(backend_priority=val):
assert nx.config.backend_priority.algos == val
assert nx.config.backend_priority.algos == orig
with nx.config.backend_priority(algos=val):
assert nx.config.backend_priority.algos == val
assert nx.config.backend_priority.algos == orig
bad = ["bad-backend"]
with pytest.raises(ValueError, match="Unknown backend"):
nx.config.backend_priority = bad
with pytest.raises(ValueError, match="Unknown backend"):
with nx.config(backend_priority=bad):
pass
with pytest.raises(ValueError, match="Unknown backend"):
with nx.config.backend_priority(algos=bad):
pass
def test_not_strict():
class FlexibleConfig(Config, strict=False):
x: int
cfg = FlexibleConfig(x=1)
assert "_strict" not in cfg
assert len(cfg) == 1
assert list(cfg) == ["x"]
assert list(cfg.keys()) == ["x"]
assert list(cfg.values()) == [1]
assert list(cfg.items()) == [("x", 1)]
assert cfg.x == 1
assert cfg["x"] == 1
assert "x" in cfg
assert hasattr(cfg, "x")
assert "FlexibleConfig(x=1)" in repr(cfg)
assert cfg == FlexibleConfig(x=1)
del cfg.x
assert "FlexibleConfig()" in repr(cfg)
assert len(cfg) == 0
assert not hasattr(cfg, "x")
assert "x" not in cfg
assert not hasattr(cfg, "y")
assert "y" not in cfg
cfg.y = 2
assert len(cfg) == 1
assert list(cfg) == ["y"]
assert list(cfg.keys()) == ["y"]
assert list(cfg.values()) == [2]
assert list(cfg.items()) == [("y", 2)]
assert cfg.y == 2
assert cfg["y"] == 2
assert hasattr(cfg, "y")
assert "y" in cfg
del cfg["y"]
assert len(cfg) == 0
assert list(cfg) == []
with pytest.raises(AttributeError, match="y"):
del cfg.y
with pytest.raises(KeyError, match="y"):
del cfg["y"]
with pytest.raises(TypeError, match="missing 1 required keyword-only"):
FlexibleConfig()
# Be strict when first creating the config object
with pytest.raises(TypeError, match="unexpected keyword argument 'y'"):
FlexibleConfig(x=1, y=2)
class FlexibleConfigWithDefault(Config, strict=False):
x: int = 0
assert FlexibleConfigWithDefault().x == 0
assert FlexibleConfigWithDefault(x=1)["x"] == 1
def test_context():
cfg = Config(x=1)
with cfg(x=2) as c:
assert c.x == 2
c.x = 3
assert cfg.x == 3
assert cfg.x == 1
with cfg(x=2) as c:
assert c == cfg
assert cfg.x == 2
with cfg(x=3) as c2:
assert c2 == cfg
assert cfg.x == 3
with pytest.raises(RuntimeError, match="context manager without"):
with cfg as c3: # Forgot to call `cfg(...)`
pass
assert cfg.x == 3
assert cfg.x == 2
assert cfg.x == 1
c = cfg(x=4) # Not yet as context (not recommended, but possible)
assert c == cfg
assert cfg.x == 4
# Cheat by looking at internal data; context stack should only grow with __enter__
assert cfg._prev is not None
assert cfg._context_stack == []
with c:
assert c == cfg
assert cfg.x == 4
assert cfg.x == 1
# Cheat again; there was no preceding `cfg(...)` call this time
assert cfg._prev is None
with pytest.raises(RuntimeError, match="context manager without"):
with cfg:
pass
assert cfg.x == 1
| EmptyConfig |
python | gevent__gevent | src/greentest/3.14/test_weakref.py | {
"start": 34067,
"end": 37431
} | class ____(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
gc_collect() # For PyPy or other GCs.
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertNotHasAttr(r, "__dict__")
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
| SubclassableWeakrefTestCase |
python | huggingface__transformers | src/transformers/models/gpt2/modeling_gpt2.py | {
"start": 25566,
"end": 34779
} | class ____(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.gradient_checkpointing = False
self._attn_implementation = config._attn_implementation
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# based on pattern from src/transformers/models/whisper/modeling_whisper.py::WhisperDecoder
if use_cache:
if past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if self.config.add_cross_attention and not isinstance(past_key_values, EncoderDecoderCache):
past_key_values = EncoderDecoderCache(past_key_values, DynamicCache(config=self.config))
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device)
# Attention mask.
# ._update_causal_mask() and ._prepare_4d_causal_attention_mask_with_cache_position() copied from LlamaModel
if attention_mask is not None and attention_mask.ndim < 4:
attention_mask = attention_mask.view(batch_size, -1)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
_use_sdpa = self._attn_implementation == "sdpa" and output_attentions is False
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if _use_sdpa:
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1]
)
elif self._attn_implementation != "flash_attention_2":
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
past_key_values if not (self.gradient_checkpointing and self.training) else None,
cache_position,
causal_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
**kwargs,
)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[2],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
past_key_values = past_key_values if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@auto_docstring(
custom_intro="""
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
| GPT2Model |
python | tensorflow__tensorflow | tensorflow/python/layers/utils_test.py | {
"start": 958,
"end": 3948
} | class ____(test.TestCase):
def testConvertDataFormat(self):
self.assertEqual('NCDHW', utils.convert_data_format('channels_first', 5))
self.assertEqual('NCHW', utils.convert_data_format('channels_first', 4))
self.assertEqual('NCW', utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', utils.convert_data_format('channels_last', 5))
with self.assertRaises(ValueError):
utils.convert_data_format('invalid', 2)
def testNormalizeTuple(self):
self.assertEqual((2, 2, 2), utils.normalize_tuple(2, n=3, name='strides'))
self.assertEqual(
(2, 1, 2), utils.normalize_tuple((2, 1, 2), n=3, name='strides'))
with self.assertRaises(ValueError):
utils.normalize_tuple((2, 1), n=3, name='strides')
with self.assertRaises(ValueError):
utils.normalize_tuple(None, n=3, name='strides')
def testNormalizeDataFormat(self):
self.assertEqual(
'channels_last', utils.normalize_data_format('Channels_Last'))
self.assertEqual(
'channels_first', utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
utils.normalize_data_format('invalid')
def testNormalizePadding(self):
self.assertEqual('same', utils.normalize_padding('SAME'))
self.assertEqual('valid', utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
utils.normalize_padding('invalid')
def testConvOutputLength(self):
self.assertEqual(4, utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, utils.conv_output_length(5, 2, 'valid', 2, 2))
def testConvInputLength(self):
self.assertEqual(3, utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, utils.conv_input_length(3, 2, 'full', 2))
def testDeconvOutputLength(self):
self.assertEqual(4, utils.deconv_output_length(4, 2, 'same', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'same', 2))
self.assertEqual(5, utils.deconv_output_length(4, 2, 'valid', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'valid', 2))
self.assertEqual(3, utils.deconv_output_length(4, 2, 'full', 1))
self.assertEqual(6, utils.deconv_output_length(4, 2, 'full', 2))
| ConvUtilsTest |
python | doocs__leetcode | solution/1300-1399/1330.Reverse Subarray To Maximize Array Value/Solution.py | {
"start": 0,
"end": 626
} | class ____:
def maxValueAfterReverse(self, nums: List[int]) -> int:
ans = s = sum(abs(x - y) for x, y in pairwise(nums))
for x, y in pairwise(nums):
ans = max(ans, s + abs(nums[0] - y) - abs(x - y))
ans = max(ans, s + abs(nums[-1] - x) - abs(x - y))
for k1, k2 in pairwise((1, -1, -1, 1, 1)):
mx, mi = -inf, inf
for x, y in pairwise(nums):
a = k1 * x + k2 * y
b = abs(x - y)
mx = max(mx, a - b)
mi = min(mi, a + b)
ans = max(ans, s + max(mx - mi, 0))
return ans
| Solution |
python | walkccc__LeetCode | solutions/53. Maximum Subarray/53-2.py | {
"start": 0,
"end": 197
} | class ____:
def maxSubArray(self, nums: list[int]) -> int:
ans = -math.inf
summ = 0
for num in nums:
summ = max(num, summ + num)
ans = max(ans, summ)
return ans
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/server/test_auth_provider.py | {
"start": 9593,
"end": 10005
} | class ____(RequestHandler): pass
""", func, suffix='.py')
def test_login_handler_wrong_type(self) -> None:
def func(filename: str):
with pytest.raises(ValueError) as e:
bsa.AuthModule(filename)
assert str(e) == "LoginHandler must be a Tornado RequestHandler"
with_file_contents("""
def get_user(handler): return 10
login_url = "/foo"
| LoginHandler |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 28471,
"end": 28696
} | class ____(_ReturnsStringKey, RoleImpl):
__slots__ = ()
def _post_coercion(self, element, *, as_key=False, **kw):
if as_key:
return element.key
else:
return element
| DMLColumnImpl |
python | huggingface__transformers | tests/models/mgp_str/test_modeling_mgp_str.py | {
"start": 1317,
"end": 4108
} | class ____:
def __init__(
self,
parent,
is_training=False,
batch_size=13,
image_size=(32, 128),
patch_size=4,
num_channels=3,
max_token_length=27,
num_character_labels=38,
num_bpe_labels=99,
num_wordpiece_labels=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
mlp_ratio=4.0,
patch_embeds_hidden_size=257,
output_hidden_states=None,
):
self.parent = parent
self.is_training = is_training
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.max_token_length = max_token_length
self.num_character_labels = num_character_labels
self.num_bpe_labels = num_bpe_labels
self.num_wordpiece_labels = num_wordpiece_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.patch_embeds_hidden_size = patch_embeds_hidden_size
self.output_hidden_states = output_hidden_states
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
config = self.get_config()
return config, pixel_values
def get_config(self):
return MgpstrConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
max_token_length=self.max_token_length,
num_character_labels=self.num_character_labels,
num_bpe_labels=self.num_bpe_labels,
num_wordpiece_labels=self.num_wordpiece_labels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
mlp_ratio=self.mlp_ratio,
output_hidden_states=self.output_hidden_states,
)
def create_and_check_model(self, config, pixel_values):
model = MgpstrForSceneTextRecognition(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
generated_ids = model(pixel_values)
self.parent.assertEqual(
generated_ids[0][0].shape, (self.batch_size, self.max_token_length, self.num_character_labels)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| MgpstrModelTester |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_dms.py | {
"start": 9166,
"end": 14008
} | class ____:
FILTER = {"Name": "replication-task-arn", "Values": [TASK_ARN]}
MOCK_DATA = {
"replication_task_id": "test_task",
"source_endpoint_arn": "source-endpoint-arn",
"target_endpoint_arn": "target-endpoint-arn",
"replication_instance_arn": "replication-instance-arn",
"migration_type": "full-load",
"table_mappings": {},
}
MOCK_RESPONSE = [
{
"ReplicationTaskIdentifier": MOCK_DATA["replication_task_id"],
"SourceEndpointArn": MOCK_DATA["source_endpoint_arn"],
"TargetEndpointArn": MOCK_DATA["target_endpoint_arn"],
"ReplicationInstanceArn": MOCK_DATA["replication_instance_arn"],
"MigrationType": MOCK_DATA["migration_type"],
"TableMappings": json.dumps(MOCK_DATA["table_mappings"]),
"ReplicationTaskArn": TASK_ARN,
"Status": "creating",
}
]
def setup_method(self):
args = {
"owner": "airflow",
"start_date": pendulum.datetime(2018, 1, 1, tz="UTC"),
}
self.dag = DAG("dms_describe_tasks_operator", default_args=args, schedule="@once")
def test_init(self):
op = DmsDescribeTasksOperator(
task_id="describe_tasks",
describe_tasks_kwargs={"Filters": [self.FILTER]},
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="eu-west-2",
verify="/foo/bar/spam.egg",
botocore_config={"read_timeout": 42},
)
assert op.describe_tasks_kwargs == {"Filters": [self.FILTER]}
assert op.hook.client_type == "dms"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "eu-west-2"
assert op.hook._verify == "/foo/bar/spam.egg"
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = DmsDescribeTasksOperator(
task_id="describe_tasks", describe_tasks_kwargs={"Filters": [self.FILTER]}
)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch.object(DmsHook, "describe_replication_tasks", return_value=(None, MOCK_RESPONSE))
@mock.patch.object(DmsHook, "get_conn")
def test_describe_tasks(self, mock_conn, mock_describe_replication_tasks):
describe_tasks_kwargs = {"Filters": [self.FILTER]}
describe_task = DmsDescribeTasksOperator(
task_id="describe_tasks", describe_tasks_kwargs=describe_tasks_kwargs
)
describe_task.execute(None)
mock_describe_replication_tasks.assert_called_once_with(**describe_tasks_kwargs)
@pytest.mark.db_test
@mock.patch.object(DmsHook, "describe_replication_tasks", return_value=(None, MOCK_RESPONSE))
@mock.patch.object(DmsHook, "get_conn")
def test_describe_tasks_return_value(
self,
mock_conn,
mock_describe_replication_tasks,
session,
clean_dags_dagruns_and_dagbundles,
testing_dag_bundle,
):
describe_task = DmsDescribeTasksOperator(
task_id="describe_tasks", dag=self.dag, describe_tasks_kwargs={"Filters": [self.FILTER]}
)
if AIRFLOW_V_3_0_PLUS:
sync_dag_to_db(self.dag)
dag_version = DagVersion.get_latest_version(self.dag.dag_id)
ti = TaskInstance(task=describe_task, dag_version_id=dag_version.id)
dag_run = DagRun(
dag_id=self.dag.dag_id,
logical_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
else:
dag_run = DagRun(
dag_id=self.dag.dag_id,
execution_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
ti = TaskInstance(task=describe_task)
ti.dag_run = dag_run
session.add(ti)
session.commit()
marker, response = describe_task.execute(ti.get_template_context())
assert marker is None
assert response == self.MOCK_RESPONSE
def test_template_fields(self):
op = DmsDescribeTasksOperator(
task_id="describe_tasks",
describe_tasks_kwargs={"Filters": [self.FILTER]},
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="eu-west-2",
verify="/foo/bar/spam.egg",
botocore_config={"read_timeout": 42},
)
validate_template_fields(op)
| TestDmsDescribeTasksOperator |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/batch_test.py | {
"start": 17813,
"end": 20777
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[100],
batch_size=[2, 7])))
def testBatch(
self, dataset_range: int, batch_size: int):
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(dataset)
dataset = dataset.unbatch()
expected = list(range(0, (dataset_range // batch_size) * batch_size))
dataset_output = self.getDatasetOutput(
dataset, requires_initialization=True)
self.assertCountEqual(dataset_output, expected)
self.assertNotEqual(dataset_output, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[100],
batch_size=[2, 7],
reshuffle=[True, False],
seed=[None, 42])))
def testReshuffleRepeatEpochs(
self,
dataset_range: int,
batch_size: int,
reshuffle: bool,
seed: Optional[int]):
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle)
dataset = dataset.repeat(2)
dataset = dataset.unbatch()
expected = list(range(0, (dataset_range // batch_size) * batch_size))
len_per_iteration = len(expected)
expected *= 2
output = self.getDatasetOutput(dataset, requires_initialization=True)
self.assertCountEqual(output, expected)
output_per_iteration = [
output[i : i + len_per_iteration]
for i in range(0, len(output), len_per_iteration)]
if reshuffle:
self.assertNotEqual(output_per_iteration[0], output_per_iteration[1])
else:
self.assertEqual(output_per_iteration[0], output_per_iteration[1])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[100],
batch_size=[2, 7])))
def testNoDropRemainder(
self, dataset_range: int, batch_size: int):
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"does not support global shuffling with `drop_remainder=False`."):
dataset = global_shuffle_op._global_shuffle(dataset)
self.getDatasetOutput(dataset, requires_initialization=True)
| BatchGlobalShuffleTest |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py | {
"start": 5452,
"end": 5904
} | class ____(BaseConfig):
extension: str
bypass_reason: Optional[str] = Field(description="Reason why this type is considered unsupported.")
@validator("extension", always=True)
def extension_properly_formatted(cls, extension: str) -> str:
if not extension.startswith(".") or len(extension) < 2:
raise ValueError("Please provide a valid file extension (e.g. '.csv').")
return extension
| UnsupportedFileTypeConfig |
python | jmcnamara__XlsxWriter | xlsxwriter/chartsheet.py | {
"start": 405,
"end": 5653
} | class ____(worksheet.Worksheet):
"""
A class for writing the Excel XLSX Chartsheet file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.is_chartsheet = True
self.drawing = None
self.chart = None
self.charts = []
self.zoom_scale_normal = False
self.orientation = 0
self.protection = False
def set_chart(self, chart: Chart) -> Chart:
"""
Set the chart object for the chartsheet.
Args:
chart: Chart object.
Returns:
chart: A reference to the chart object.
"""
chart.embedded = False
chart.protection = self.protection
self.chart = chart
self.charts.append([0, 0, chart, 0, 0, 1, 1])
return chart
def protect(
self, password: str = "", options: Optional[Dict[str, Any]] = None
) -> None:
"""
Set the password and protection options of the worksheet.
Args:
password: An optional password string.
options: A dictionary of worksheet objects to protect.
Returns:
Nothing.
"""
# This method is overridden from parent worksheet class.
# Chartsheets only allow a reduced set of protect options.
copy = {}
if not options:
options = {}
if options.get("objects") is None:
copy["objects"] = False
else:
# Objects are default on for chartsheets, so reverse state.
copy["objects"] = not options["objects"]
if options.get("content") is None:
copy["content"] = True
else:
copy["content"] = options["content"]
copy["sheet"] = False
copy["scenarios"] = True
# If objects and content are both off then the chartsheet isn't
# protected, unless it has a password.
if password == "" and copy["objects"] and not copy["content"]:
return
if self.chart:
self.chart.protection = True
else:
self.protection = True
# Call the parent method.
super().protect(password, copy)
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the root worksheet element.
self._write_chartsheet()
# Write the worksheet properties.
self._write_sheet_pr()
# Write the sheet view properties.
self._write_sheet_views()
# Write the sheetProtection element.
self._write_sheet_protection()
# Write the printOptions element.
self._write_print_options()
# Write the worksheet page_margins.
self._write_page_margins()
# Write the worksheet page setup.
self._write_page_setup()
# Write the headerFooter element.
self._write_header_footer()
# Write the drawing element.
self._write_drawings()
# Write the legacyDrawingHF element.
self._write_legacy_drawing_hf()
# Close the worksheet tag.
self._xml_end_tag("chartsheet")
# Close the file.
self._xml_close()
def _prepare_chart(self, index, chart_id, drawing_id) -> None:
# Set up chart/drawings.
self.chart.id = chart_id - 1
self.drawing = Drawing()
self.drawing.orientation = self.orientation
self.external_drawing_links.append(
["/drawing", "../drawings/drawing" + str(drawing_id) + ".xml"]
)
self.drawing_links.append(
["/chart", "../charts/chart" + str(chart_id) + ".xml"]
)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_chartsheet(self) -> None:
# Write the <worksheet> element. This is the root element.
schema = "http://schemas.openxmlformats.org/"
xmlns = schema + "spreadsheetml/2006/main"
xmlns_r = schema + "officeDocument/2006/relationships"
attributes = [("xmlns", xmlns), ("xmlns:r", xmlns_r)]
self._xml_start_tag("chartsheet", attributes)
def _write_sheet_pr(self) -> None:
# Write the <sheetPr> element for Sheet level properties.
attributes = []
if self.filter_on:
attributes.append(("filterMode", 1))
if self.fit_page or self.tab_color:
self._xml_start_tag("sheetPr", attributes)
self._write_tab_color()
self._write_page_set_up_pr()
self._xml_end_tag("sheetPr")
else:
self._xml_empty_tag("sheetPr", attributes)
| Chartsheet |
python | catalyst-team__catalyst | catalyst/callbacks/metrics/segmentation.py | {
"start": 214,
"end": 4129
} | class ____(BatchMetricCallback):
"""IOU metric callback.
Args:
input_key: input key to use for metric calculation, specifies our `y_pred`
target_key: output key to use for metric calculation, specifies our `y_true`
class_dim: indicates class dimension (K) for ``outputs`` and
``targets`` tensors (default = 1)
weights: class weights
class_names: class names
threshold: threshold for outputs binarization
log_on_batch: boolean flag to log computed metrics every batch
compute_per_class_metrics: boolean flag to compute per-class metrics
(default: SETTINGS.compute_per_class_metrics or False).
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.data import ToTensor
from catalyst.contrib import MNIST, IoULoss
model = nn.Sequential(
nn.Conv2d(1, 1, 3, 1, 1), nn.ReLU(),
nn.Conv2d(1, 1, 3, 1, 1), nn.Sigmoid(),
)
criterion = IoULoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()),
batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False),
batch_size=32
),
}
class CustomRunner(dl.SupervisedRunner):
def handle_batch(self, batch):
x = batch[self._input_key]
x_noise = (x + torch.rand_like(x)).clamp_(0, 1)
x_ = self.model(x_noise)
self.batch = {
self._input_key: x, self._output_key: x_, self._target_key: x
}
runner = CustomRunner(
input_key="features",
output_key="scores",
target_key="targets",
loss_key="loss"
)
# model training
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
callbacks=[
dl.IOUCallback(input_key="scores", target_key="targets"),
dl.DiceCallback(input_key="scores", target_key="targets"),
dl.TrevskyCallback(input_key="scores", target_key="targets", alpha=0.2),
],
logdir="./logdir",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
verbose=True,
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
class_dim: int = 1,
weights: Optional[List[float]] = None,
class_names: Optional[List[str]] = None,
threshold: Optional[float] = None,
log_on_batch: bool = True,
compute_per_class_metrics: bool = SETTINGS.compute_per_class_metrics,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=IOUMetric(
class_dim=class_dim,
weights=weights,
class_names=class_names,
threshold=threshold,
compute_per_class_metrics=compute_per_class_metrics,
prefix=prefix,
suffix=suffix,
),
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
| IOUCallback |
python | django__django | tests/extra_regress/models.py | {
"start": 104,
"end": 746
} | class ____(models.Model):
base = models.ForeignKey("self", models.SET_NULL, null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def save(self, *args, force_insert=False, force_update=False, **kwargs):
super().save(
*args, force_insert=force_insert, force_update=force_update, **kwargs
)
if not self.base:
self.base = self
super().save(*args, **kwargs)
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
| RevisionableModel |
python | RaRe-Technologies__gensim | gensim/test/test_matutils.py | {
"start": 4924,
"end": 10561
} | class ____(unittest.TestCase):
# test unitvec
def test_sparse_npfloat32(self):
input_vector = sparse.csr_matrix(np.asarray([[1, 0, 0, 0, 3], [0, 0, 4, 3, 0]])).astype(np.float32)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector.data, man_unit_vector.data, atol=1e-3))
self.assertEqual(input_vector.dtype, unit_vector.dtype)
def test_sparse_npfloat64(self):
input_vector = sparse.csr_matrix(np.asarray([[1, 0, 0, 0, 3], [0, 0, 4, 3, 0]])).astype(np.float64)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector.data, man_unit_vector.data, atol=1e-3))
self.assertEqual(input_vector.dtype, unit_vector.dtype)
def test_sparse_npint32(self):
input_vector = sparse.csr_matrix(np.asarray([[1, 0, 0, 0, 3], [0, 0, 4, 3, 0]])).astype(np.int32)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector.data, man_unit_vector.data, atol=1e-3))
self.assertTrue(np.issubdtype(unit_vector.dtype, np.floating))
def test_sparse_npint64(self):
input_vector = sparse.csr_matrix(np.asarray([[1, 0, 0, 0, 3], [0, 0, 4, 3, 0]])).astype(np.int64)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector.data, man_unit_vector.data, atol=1e-3))
self.assertTrue(np.issubdtype(unit_vector.dtype, np.floating))
def test_dense_npfloat32(self):
input_vector = np.random.uniform(size=(5,)).astype(np.float32)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector, man_unit_vector))
self.assertEqual(input_vector.dtype, unit_vector.dtype)
def test_dense_npfloat64(self):
input_vector = np.random.uniform(size=(5,)).astype(np.float64)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector, man_unit_vector))
self.assertEqual(input_vector.dtype, unit_vector.dtype)
def test_dense_npint32(self):
input_vector = np.random.randint(10, size=5).astype(np.int32)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector, man_unit_vector))
self.assertTrue(np.issubdtype(unit_vector.dtype, np.floating))
def test_dense_npint64(self):
input_vector = np.random.randint(10, size=5).astype(np.int32)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector, man_unit_vector))
self.assertTrue(np.issubdtype(unit_vector.dtype, np.floating))
def test_sparse_python_float(self):
input_vector = sparse.csr_matrix(np.asarray([[1, 0, 0, 0, 3], [0, 0, 4, 3, 0]])).astype(float)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector.data, man_unit_vector.data, atol=1e-3))
self.assertEqual(input_vector.dtype, unit_vector.dtype)
def test_sparse_python_int(self):
input_vector = sparse.csr_matrix(np.asarray([[1, 0, 0, 0, 3], [0, 0, 4, 3, 0]])).astype(int)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector.data, man_unit_vector.data, atol=1e-3))
self.assertTrue(np.issubdtype(unit_vector.dtype, np.floating))
def test_dense_python_float(self):
input_vector = np.random.uniform(size=(5,)).astype(float)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector, man_unit_vector))
self.assertEqual(input_vector.dtype, unit_vector.dtype)
def test_dense_python_int(self):
input_vector = np.random.randint(10, size=5).astype(int)
unit_vector = matutils.unitvec(input_vector)
man_unit_vector = manual_unitvec(input_vector)
self.assertTrue(np.allclose(unit_vector, man_unit_vector))
self.assertTrue(np.issubdtype(unit_vector.dtype, np.floating))
def test_return_norm_zero_vector_scipy_sparse(self):
input_vector = sparse.csr_matrix([[]], dtype=np.int32)
return_value = matutils.unitvec(input_vector, return_norm=True)
self.assertTrue(isinstance(return_value, tuple))
norm = return_value[1]
self.assertTrue(isinstance(norm, float))
self.assertEqual(norm, 1.0)
def test_return_norm_zero_vector_numpy(self):
input_vector = np.array([], dtype=np.int32)
return_value = matutils.unitvec(input_vector, return_norm=True)
self.assertTrue(isinstance(return_value, tuple))
norm = return_value[1]
self.assertTrue(isinstance(norm, float))
self.assertEqual(norm, 1.0)
def test_return_norm_zero_vector_gensim_sparse(self):
input_vector = []
return_value = matutils.unitvec(input_vector, return_norm=True)
self.assertTrue(isinstance(return_value, tuple))
norm = return_value[1]
self.assertTrue(isinstance(norm, float))
self.assertEqual(norm, 1.0)
| UnitvecTestCase |
python | pytorch__pytorch | test/dynamo/test_package.py | {
"start": 1006,
"end": 23980
} | class ____(torch._inductor.test_case.TestCase):
def path(self):
path = os.path.join(cache_dir(), f"package_{self.id()}")
os.makedirs(path, exist_ok=True)
return path
def setUp(self):
super().setUp()
torch._dynamo.reset()
torch._dynamo.utils.counters.clear()
DynamoCache.clear()
PrecompileContext.clear()
def _save_and_reload(self, expected_backends, expected_dynamo):
"""
Serializes all artifacts, clears all caches, then reloads the serialized artifact
Simulates a new process.
Args:
expected_backends: Expected number of precompile_aot_autograd_artifacts
expected_dynamo: Expected number of precompile_dynamo_artifacts
"""
debug_info = PrecompileContext.save_to_dynamo_cache()
self.assertEqual(len(debug_info["dynamo"]), expected_dynamo)
self.assertEqual(len(debug_info["backends"]), expected_backends)
torch._dynamo.reset()
PrecompileContext.clear()
@unittest.expectedFailure # FUNCTION_MATCH guard not serializable today
def test_nn_module(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10, device="cuda")
def forward(self, x):
return self.linear(x)
fn = MyModule()
package = CompilePackage(fn.forward)
compiled_fn = torch._dynamo.optimize("inductor", package=package)(fn)
x = torch.randn(10, 10, device="cuda")
compiled_fn(x)
@parametrize("backend", ("eager", "inductor"))
@parametrize("device", ("cpu", "cuda", "xpu"))
def test_basic_fn(self, backend, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
ctx = DiskDynamoStore()
def fn(x):
return x + 1
args = (
torch.randn(
3,
2,
device=device,
),
)
# Saving
package = CompilePackage(fn)
compiled_fn = torch._dynamo.optimize(backend, package=package)(fn)
expected = compiled_fn(*args)
if backend == "eager":
for backend_id, backend in package.cached_backends.items():
ctx.record_eager_backend(backend_id, backend)
ctx.save_package(package, self.path())
# Loading
torch._dynamo.reset()
with torch.compiler.set_stance("fail_on_recompile"):
with self.assertRaisesRegex(
RuntimeError,
"Detected recompile when torch.compile stance is 'fail_on_recompile'",
):
compiled_fn(*args)
package, backends = ctx.load_package(fn, self.path())
compiled_fn = torch._dynamo.optimize(package=package)(fn)
package.install(backends)
self.assertEqual(expected, compiled_fn(*args))
@parametrize("backend", ("eager", "inductor"))
@parametrize("device", ("cpu", "cuda", "xpu"))
def test_lazy_backward(self, backend, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
ctx = DiskDynamoStore()
def fn(x):
return x.sin() + x.cos()
args = (
torch.zeros(
3,
2,
device=device,
requires_grad=True,
),
)
# Saving
package = CompilePackage(fn)
compiled_fn = torch._dynamo.optimize(backend, package=package)(fn)
expected = compiled_fn(*args)
expected.sum().backward()
if backend == "eager":
for backend_id, backend in package.cached_backends.items():
ctx.record_eager_backend(backend_id, backend)
ctx.save_package(package, self.path())
# Loading
torch._dynamo.reset()
with torch.compiler.set_stance("fail_on_recompile"):
with self.assertRaisesRegex(
RuntimeError,
"Detected recompile when torch.compile stance is 'fail_on_recompile'",
):
compiled_fn(*args)
package, backends = ctx.load_package(fn, self.path())
compiled_fn = torch._dynamo.optimize(package=package)(fn)
package.install(backends)
self.assertEqual(expected, compiled_fn(*args))
@parametrize("backend", ("eager", "inductor"))
@parametrize("device", ("cpu", "cuda", "xpu"))
def test_graph_break_bomb(self, backend, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
ctx = DiskDynamoStore()
def fn(x, l, r):
if l > r:
return x.sum()
mid = (l + r) // 2
if x.sum() == mid:
return x.sum()
elif x.sum() < mid:
return fn(x, l, mid)
else:
return fn(x, mid + 1, r)
def guard_filter_fn(guards):
return [
guard.guard_type not in ("CLOSURE_MATCH", "FUNCTION_MATCH")
for guard in guards
]
# Saving
package = CompilePackage(fn)
compiled_fn = torch._dynamo.optimize(
backend=backend, package=package, guard_filter_fn=guard_filter_fn
)(fn)
N = 10
args_list = [(torch.tensor(x, device=device), 0, N - 1) for x in range(N)]
for args in args_list:
compiled_fn(*args)
if backend == "eager":
for backend_id, backend in package.cached_backends.items():
ctx.record_eager_backend(backend_id, backend)
ctx.save_package(package, self.path())
# Loading
torch._dynamo.reset()
with torch.compiler.set_stance("fail_on_recompile"):
for args in args_list:
with self.assertRaisesRegex(
RuntimeError,
"Detected recompile when torch.compile stance is 'fail_on_recompile'",
):
compiled_fn(*args)
package, backends = ctx.load_package(fn, self.path())
compiled_fn = torch._dynamo.optimize(
backend="eager", package=package, guard_filter_fn=guard_filter_fn
)(fn)
package.install(backends)
for args in args_list:
self.assertEqual(compiled_fn(*args), args[0].sum())
with self.assertRaisesRegex(
RuntimeError,
"Detected recompile when torch.compile stance is 'fail_on_recompile'",
):
compiled_fn(torch.tensor(N), 0, N - 1)
@parametrize("backend", ("eager", "inductor"))
@parametrize("device", ("cpu", "cuda", "xpu"))
def test_dynamic_shape(self, backend, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
ctx = DiskDynamoStore()
def fn(x):
return x + x.shape[0]
args = (torch.randn(3, 2, device=device),)
args1 = (torch.randn(5, 2, device=device),)
args2 = (torch.randn(7, 2, device=device),)
expected1 = fn(*args1)
torch._dynamo.mark_dynamic(args[0], 0, min=3, max=5)
# Saving
package = CompilePackage(fn)
compiled_fn = torch._dynamo.optimize(backend=backend, package=package)(fn)
compiled_fn(*args)
if backend == "eager":
for backend_id, backend in package.cached_backends.items():
ctx.record_eager_backend(backend_id, backend)
ctx.save_package(package, self.path())
# Loading
torch._dynamo.reset()
with torch.compiler.set_stance("fail_on_recompile"):
with self.assertRaisesRegex(
RuntimeError,
"Detected recompile when torch.compile stance is 'fail_on_recompile'",
):
compiled_fn(*args1)
package, backends = ctx.load_package(fn, self.path())
compiled_fn = torch._dynamo.optimize(package=package)(fn)
package.install(backends)
self.assertEqual(expected1, compiled_fn(*args1))
with self.assertRaisesRegex(
RuntimeError,
"Detected recompile when torch.compile stance is 'fail_on_recompile'",
):
compiled_fn(*args2)
def test_file_change(self):
ctx = DiskDynamoStore()
def import_from_path(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
mock_module_add_original = """
def add(x, y):
return x + y
"""
mock_module_add_modified = """
def add(x, y):
return x - y
"""
with tempfile.TemporaryDirectory() as tmp_dir:
mock_module_add_original_path = os.path.join(
tmp_dir, "mock_module_add_original.py"
)
mock_module_add_modified_path = os.path.join(
tmp_dir, "mock_module_add_modified.py"
)
with open(mock_module_add_original_path, "w") as f:
f.write(mock_module_add_original)
with open(mock_module_add_modified_path, "w") as f:
f.write(mock_module_add_modified)
module = import_from_path(
"torch.test_package_helper",
mock_module_add_original_path,
)
def fn(x):
return module.add(x, 1)
args = (torch.randn(3, 2),)
def guard_filter_fn(guards):
return [
guard.guard_type
not in ("CLOSURE_MATCH", "FUNCTION_MATCH", "MODULE_MATCH")
for guard in guards
]
# Saving
package = CompilePackage(fn)
compiled_fn = torch._dynamo.optimize(
backend="eager", package=package, guard_filter_fn=guard_filter_fn
)(fn)
compiled_fn(*args)
for backend_id, backend in package.cached_backends.items():
ctx.record_eager_backend(backend_id, backend)
ctx.save_package(package, self.path())
module = import_from_path(
"torch.test_package_helper",
mock_module_add_modified_path,
)
with self.assertRaisesRegex(RuntimeError, "Source code changes detected"):
ctx.load_package(fn, self.path())
module = import_from_path(
"torch.test_package_helper",
mock_module_add_original_path,
)
ctx.load_package(fn, self.path())
@parametrize("device", ("cpu", "cuda", "xpu"))
def test_dynamo_cache_manual_load(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def fn(x):
return x.sin() + x.cos()
def fn2(x):
return x.cos() + x
package1 = CompilePackage(fn)
package2 = CompilePackage(fn2)
compiled_fn1 = torch._dynamo.optimize(backend="inductor", package=package1)(fn)
compiled_fn2 = torch._dynamo.optimize(backend="inductor", package=package2)(fn2)
arg1 = torch.randn(3, 2, device=device)
arg2 = torch.randn(5, 2, device=device)
expected = [compiled_fn1(arg1), compiled_fn2(arg2)]
DynamoCache.save(package1)
DynamoCache.save(package2)
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
self._save_and_reload(expected_backends=2, expected_dynamo=2)
# These should exist because of populate_caches
package1 = DynamoCache.load_and_install_package(fn)
package2 = DynamoCache.load_and_install_package(fn2)
with torch.compiler.set_stance("fail_on_recompile"):
result1 = compiled_fn1(arg1)
result2 = compiled_fn2(arg2)
self.assertEqual(expected, [result1, result2])
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_automatic_dynamo_serialize(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def fn(x):
return x.sin() + x.cos()
def fn2(x):
return x.cos() + x
arg1 = torch.randn(3, 2, device=device)
arg2 = torch.randn(5, 2, device=device)
expected = [fn(arg1), fn2(arg2)]
compiled_fn1 = torch.compile(fn)
compiled_fn2 = torch.compile(fn2)
result = [compiled_fn1(arg1), compiled_fn2(arg2)]
self.assertEqual(expected, result)
DynamoCache.clear()
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
self._save_and_reload(expected_backends=2, expected_dynamo=2)
compiled_fn1 = torch.compile(fn)
compiled_fn2 = torch.compile(fn2)
with torch.compiler.set_stance("fail_on_recompile"):
result1 = compiled_fn1(arg1)
result2 = compiled_fn2(arg2)
self.assertEqual(expected, [result1, result2])
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_automatic_dynamo_recompiles(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def fn(x):
return x.sin() + x.cos()
arg1 = torch.randn(3, 2, device=device)
arg2 = torch.randn(5, 2, device=device)
compiled_fn = torch.compile(fn)
expected1 = compiled_fn(arg1)
# Should cause a recompile
expected2 = compiled_fn(arg2)
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
self._save_and_reload(expected_backends=2, expected_dynamo=1)
compiled_fn = torch.compile(fn)
with torch.compiler.set_stance("fail_on_recompile"):
result1 = compiled_fn(arg1)
result2 = compiled_fn(arg2)
# Because of automatic dynamic, a third random shape should also not cause a recompile
arg3 = torch.randn(7, 2, device=device)
compiled_fn(arg3)
self.assertEqual(result1, expected1)
self.assertEqual(result2, expected2)
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_automatic_dynamo_graph_breaks(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def fn(x, l, r):
if l > r:
return x.sum()
mid = (l + r) // 2
if x.sum() == mid:
return x.sum()
elif x.sum() < mid:
return fn(x, l, mid)
else:
return fn(x, mid + 1, r)
def guard_filter_fn(guards):
return [
guard.guard_type not in ("CLOSURE_MATCH", "FUNCTION_MATCH")
for guard in guards
]
# Saving
compiled_fn = torch._dynamo.optimize(
backend="inductor", guard_filter_fn=guard_filter_fn
)(fn)
N = 10
args_list = [(torch.tensor(x, device=device), 0, N - 1) for x in range(N)]
for args in args_list:
compiled_fn(*args)
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
self._save_and_reload(expected_backends=8, expected_dynamo=1)
compiled_fn = torch._dynamo.optimize(
backend="inductor", guard_filter_fn=guard_filter_fn
)(fn)
with torch.compiler.set_stance("fail_on_recompile"):
for args in args_list:
self.assertEqual(compiled_fn(*args), args[0].sum())
# Should have same number of frames as on cold start
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_automatic_dynamo_lazy_backward(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def fn(x):
return x.sin() + x.cos()
arg1 = torch.randn(3, 2, device=device, requires_grad=True)
arg2 = arg1.clone().detach_().requires_grad_(True)
compiled_fn = torch.compile(fn)
expected1 = compiled_fn(arg1)
expected1.sum().backward()
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
self._save_and_reload(expected_backends=1, expected_dynamo=1)
compiled_fn = torch.compile(fn)
# Run it again, no recompile needed
with torch.compiler.set_stance("fail_on_recompile"):
expected2 = compiled_fn(arg2)
expected2.sum().backward()
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_graph_break_partial_backend(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def fn(x):
y = x.sin()
torch._dynamo.graph_break()
return x.sin() + y
arg1 = torch.randn(3, 2, device=device, requires_grad=True)
arg2 = arg1.clone().detach_().requires_grad_(True)
compiled_fn = torch.compile(fn)
expected1 = compiled_fn(arg1)
expected1.sum().backward()
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
# Remove backends related to resume functions
dynamo_entry = next(iter(PrecompileContext._dynamo_cache_entries.values()))
for code in dynamo_entry.codes:
module = sys.modules[code.python_module]
if code.install_to_global:
# Clear the fn_names from global scope, to simulate a new environment
for fn_name in code.function_names:
module.__dict__.pop(fn_name)
for fn_name in code.function_names:
if "resume" in fn_name:
self.assertEqual(len(code.backend_ids), 1)
# delete the fn from the global scope to simulate a new
backend = code.backend_ids[0]
# Delete the backend associated with the resume function
del PrecompileContext._backend_artifacts_by_key[backend]
self._save_and_reload(expected_backends=1, expected_dynamo=1)
compiled_fn = torch.compile(fn)
# Run it again. There will be a recompile because one of the backends is deleted, but it should
# still work.
expected2 = compiled_fn(arg2)
expected2.sum().backward()
self.assertEqual(expected1, expected2)
# One recompile on a new frame, so total_frames should increase by 1
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames + 1)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_call_function_from_resume(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
mod = torch.nn.Linear(2, 3, device=device)
def foo(x, mod):
pred = mod(x)
compute_loss_helper(pred).backward()
return None
args = (torch.randn(3, 2, device=device), mod)
compiled_fn = torch.compile(foo)
compiled_fn(*args)
total_frames = torch._dynamo.convert_frame.FRAME_COUNTER
self._save_and_reload(expected_backends=1, expected_dynamo=1)
compiled_fn = torch.compile(foo)
# Run it again, no recompile needed
with torch.compiler.set_stance("fail_on_recompile"):
compiled_fn(*args)
self.assertEqual(torch._dynamo.convert_frame.FRAME_COUNTER, total_frames)
@parametrize("device", ("cpu", "cuda", "xpu"))
@torch._dynamo.config.patch(caching_precompile=True)
def test_code_with_generator(self, device):
if device == "cuda" and not HAS_CUDA_AND_TRITON:
raise unittest.SkipTest("Requires CUDA/Triton")
if device == "xpu" and not HAS_XPU_AND_TRITON:
raise unittest.SkipTest("Requires XPU/Triton")
def foo(set_of_x):
if not all(isinstance(s, torch.Tensor) for s in set_of_x):
raise TypeError(
f"Expected all elements of set_of_x to be tensors, got {set_of_x}"
)
return torch.cat(set_of_x, dim=0)
args = ([torch.randn(3, 2, device=device) for _ in range(3)],)
compiled_fn = torch.compile(foo)
compiled_fn(*args)
self._save_and_reload(expected_backends=1, expected_dynamo=1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestPackage |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 3411,
"end": 11484
} | class ____(CanvasCase):
def test_getitem_property_class(self):
assert Signature.task
assert Signature.args
assert Signature.kwargs
assert Signature.options
assert Signature.subtask_type
def test_getitem_property(self):
assert SIG.task == 'TASK'
assert SIG.args == ('A1',)
assert SIG.kwargs == {'K1': 'V1'}
assert SIG.options == {'task_id': 'TASK_ID'}
assert SIG.subtask_type == ''
def test_call(self):
x = Signature('foo', (1, 2), {'arg1': 33}, app=self.app)
x.type = Mock(name='type')
x(3, 4, arg2=66)
x.type.assert_called_with(3, 4, 1, 2, arg1=33, arg2=66)
def test_link_on_scalar(self):
x = Signature('TASK', link=Signature('B'))
assert x.options['link']
x.link(Signature('C'))
assert isinstance(x.options['link'], list)
assert Signature('B') in x.options['link']
assert Signature('C') in x.options['link']
def test_json(self):
x = Signature('TASK', link=Signature('B', app=self.app), app=self.app)
assert x.__json__() == dict(x)
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
x = Signature('TASK', (2, 4), app=self.app)
fun, args = x.__reduce__()
assert fun(*args) == x
def test_replace(self):
x = Signature('TASK', ('A',), {})
assert x.replace(args=('B',)).args == ('B',)
assert x.replace(kwargs={'FOO': 'BAR'}).kwargs == {
'FOO': 'BAR',
}
assert x.replace(options={'task_id': '123'}).options == {
'task_id': '123',
}
def test_set(self):
assert Signature('TASK', x=1).set(task_id='2').options == {
'x': 1, 'task_id': '2',
}
def test_link(self):
x = signature(SIG)
x.link(SIG)
x.link(SIG)
assert SIG in x.options['link']
assert len(x.options['link']) == 1
def test_link_error(self):
x = signature(SIG)
x.link_error(SIG)
x.link_error(SIG)
assert SIG in x.options['link_error']
assert len(x.options['link_error']) == 1
def test_flatten_links(self):
tasks = [self.add.s(2, 2), self.mul.s(4), self.div.s(2)]
tasks[0].link(tasks[1])
tasks[1].link(tasks[2])
assert tasks[0].flatten_links() == tasks
def test_OR(self, subtests):
x = self.add.s(2, 2) | self.mul.s(4)
assert isinstance(x, _chain)
y = self.add.s(4, 4) | self.div.s(2)
z = x | y
assert isinstance(y, _chain)
assert isinstance(z, _chain)
assert len(z.tasks) == 4
with pytest.raises(TypeError):
x | 10
ax = self.add.s(2, 2) | (self.add.s(4) | self.add.s(8))
assert isinstance(ax, _chain)
assert len(ax.tasks), 3 == 'consolidates chain to chain'
with subtests.test('Test chaining with a non-signature object'):
with pytest.raises(TypeError):
assert signature('foo') | None
def test_INVERT(self):
x = self.add.s(2, 2)
x.apply_async = Mock()
x.apply_async.return_value = Mock()
x.apply_async.return_value.get = Mock()
x.apply_async.return_value.get.return_value = 4
assert ~x == 4
x.apply_async.assert_called()
def test_merge_immutable(self):
x = self.add.si(2, 2, foo=1)
args, kwargs, options = x._merge((4,), {'bar': 2}, {'task_id': 3})
assert args == (2, 2)
assert kwargs == {'foo': 1}
assert options == {'task_id': 3}
def test_merge_options__none(self):
sig = self.add.si()
_, _, new_options = sig._merge()
assert new_options is sig.options
_, _, new_options = sig._merge(options=None)
assert new_options is sig.options
@pytest.mark.parametrize("immutable_sig", (True, False))
def test_merge_options__group_id(self, immutable_sig):
# This is to avoid testing the behaviour in `test_set_immutable()`
if immutable_sig:
sig = self.add.si()
else:
sig = self.add.s()
# If the signature has no group ID, it can be set
assert not sig.options
_, _, new_options = sig._merge(options={"group_id": sentinel.gid})
assert new_options == {"group_id": sentinel.gid}
# But if one is already set, the new one is silently ignored
sig.set(group_id=sentinel.old_gid)
_, _, new_options = sig._merge(options={"group_id": sentinel.new_gid})
assert new_options == {"group_id": sentinel.old_gid}
def test_set_immutable(self):
x = self.add.s(2, 2)
assert not x.immutable
x.set(immutable=True)
assert x.immutable
x.set(immutable=False)
assert not x.immutable
def test_election(self):
x = self.add.s(2, 2)
x.freeze('foo')
x.type.app.control = Mock()
r = x.election()
x.type.app.control.election.assert_called()
assert r.id == 'foo'
def test_AsyncResult_when_not_registered(self):
s = signature('xxx.not.registered', app=self.app)
assert s.AsyncResult
def test_apply_async_when_not_registered(self):
s = signature('xxx.not.registered', app=self.app)
assert s._apply_async
def test_keeping_link_error_on_chaining(self):
x = self.add.s(2, 2) | self.mul.s(4)
assert isinstance(x, _chain)
x.link_error(SIG)
assert SIG in x.options['link_error']
t = signature(SIG)
z = x | t
assert isinstance(z, _chain)
assert t in z.tasks
assert not z.options.get('link_error')
assert SIG in z.tasks[0].options['link_error']
assert not z.tasks[2].options.get('link_error')
assert SIG in x.options['link_error']
assert t not in x.tasks
assert not x.tasks[0].options.get('link_error')
z = t | x
assert isinstance(z, _chain)
assert t in z.tasks
assert not z.options.get('link_error')
assert SIG in z.tasks[1].options['link_error']
assert not z.tasks[0].options.get('link_error')
assert SIG in x.options['link_error']
assert t not in x.tasks
assert not x.tasks[0].options.get('link_error')
y = self.add.s(4, 4) | self.div.s(2)
assert isinstance(y, _chain)
z = x | y
assert isinstance(z, _chain)
assert not z.options.get('link_error')
assert SIG in z.tasks[0].options['link_error']
assert not z.tasks[2].options.get('link_error')
assert SIG in x.options['link_error']
assert not x.tasks[0].options.get('link_error')
z = y | x
assert isinstance(z, _chain)
assert not z.options.get('link_error')
assert SIG in z.tasks[3].options['link_error']
assert not z.tasks[1].options.get('link_error')
assert SIG in x.options['link_error']
assert not x.tasks[0].options.get('link_error')
def test_signature_on_error_adds_error_callback(self):
sig = signature('sig').on_error(signature('on_error'))
assert sig.options['link_error'] == [signature('on_error')]
@pytest.mark.parametrize('_id, group_id, chord, root_id, parent_id, group_index', [
('_id', 'group_id', 'chord', 'root_id', 'parent_id', 1),
])
def test_freezing_args_set_in_options(self, _id, group_id, chord, root_id, parent_id, group_index):
sig = self.add.s(1, 1)
sig.freeze(
_id=_id,
group_id=group_id,
chord=chord,
root_id=root_id,
parent_id=parent_id,
group_index=group_index,
)
options = sig.options
assert options['task_id'] == _id
assert options['group_id'] == group_id
assert options['chord'] == chord
assert options['root_id'] == root_id
assert options['parent_id'] == parent_id
assert options['group_index'] == group_index
| test_Signature |
python | pallets__jinja | tests/test_utils.py | {
"start": 4447,
"end": 6367
} | class ____:
def test_lorem_ipsum_markup(self):
"""Test that output of lorem_ipsum is Markup by default."""
assert isinstance(generate_lorem_ipsum(), Markup)
def test_lorem_ipsum_html(self):
"""Test that output of lorem_ipsum is a string_type when not html."""
assert isinstance(generate_lorem_ipsum(html=False), str)
def test_lorem_ipsum_n(self):
"""Test that the n (number of lines) works as expected."""
assert generate_lorem_ipsum(n=0, html=False) == ""
for n in range(1, 50):
assert generate_lorem_ipsum(n=n, html=False).count("\n") == (n - 1) * 2
def test_lorem_ipsum_min(self):
"""Test that at least min words are in the output of each line"""
for _ in range(5):
m = random.randrange(20, 99)
for _ in range(10):
assert generate_lorem_ipsum(n=1, min=m, html=False).count(" ") >= m - 1
def test_lorem_ipsum_max(self):
"""Test that at least max words are in the output of each line"""
for _ in range(5):
m = random.randrange(21, 100)
for _ in range(10):
assert generate_lorem_ipsum(n=1, max=m, html=False).count(" ") < m - 1
def test_missing():
"""Test the repr of missing."""
assert repr(missing) == "missing"
def test_consume():
"""Test that consume consumes an iterator."""
x = iter([1, 2, 3, 4, 5])
consume(x)
with pytest.raises(StopIteration):
next(x)
@pytest.mark.parametrize("protocol", range(pickle.HIGHEST_PROTOCOL + 1))
def test_pickle_missing(protocol: int) -> None:
"""Test that missing can be pickled while remaining a singleton."""
assert pickle.loads(pickle.dumps(missing, protocol)) is missing
def test_copy_missing() -> None:
"""Test that missing can be copied while remaining a singleton."""
assert copy.copy(missing) is missing
| TestLoremIpsum |
python | ray-project__ray | python/ray/util/client/logsclient.py | {
"start": 700,
"end": 4945
} | class ____:
def __init__(self, client_worker: "Worker", metadata: list):
"""Initializes a thread-safe log stream over a Ray Client gRPC channel.
Args:
client_worker: The Ray Client worker that manages this client
metadata: metadata to pass to gRPC requests
"""
self.client_worker = client_worker
self._metadata = metadata
self.request_queue = queue.Queue()
self.log_thread = self._start_logthread()
self.log_thread.start()
self.last_req = None
def _start_logthread(self) -> threading.Thread:
return threading.Thread(target=self._log_main, args=(), daemon=True)
def _log_main(self) -> None:
reconnecting = False
while not self.client_worker._in_shutdown:
if reconnecting:
# Refresh queue and retry last request
self.request_queue = queue.Queue()
if self.last_req:
self.request_queue.put(self.last_req)
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.client_worker.channel)
try:
log_stream = stub.Logstream(
iter(self.request_queue.get, None), metadata=self._metadata
)
except ValueError:
# Trying to use the stub on a cancelled channel will raise
# ValueError. This should only happen when the data client
# is attempting to reset the connection -- sleep and try
# again.
time.sleep(0.5)
continue
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
return
except grpc.RpcError as e:
reconnecting = self._process_rpc_error(e)
if not reconnecting:
return
def _process_rpc_error(self, e: grpc.RpcError) -> bool:
"""
Processes RPC errors that occur while reading from data stream.
Returns True if the error can be recovered from, False otherwise.
"""
if self.client_worker._can_reconnect(e):
if log_once("lost_reconnect_logs"):
logger.warning(
"Log channel is reconnecting. Logs produced while "
"the connection was down can be found on the head "
"node of the cluster in "
"`ray_client_server_[port].out`"
)
logger.debug("Log channel dropped, retrying.")
time.sleep(0.5)
return True
logger.debug("Shutting down log channel.")
if not self.client_worker._in_shutdown:
logger.exception("Unexpected exception:")
return False
def log(self, level: int, msg: str):
"""Log the message from the log stream.
By default, calls logger.log but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
logger.log(level=level, msg=msg)
def stdstream(self, level: int, msg: str):
"""Log the stdout/stderr entry from the log stream.
By default, calls print but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
print_file = sys.stderr if level == -2 else sys.stdout
print(msg, file=print_file, end="")
def set_logstream_level(self, level: int):
logger.setLevel(level)
req = ray_client_pb2.LogSettingsRequest()
req.enabled = True
req.loglevel = level
self.request_queue.put(req)
self.last_req = req
def close(self) -> None:
self.request_queue.put(None)
if self.log_thread is not None:
self.log_thread.join()
def disable_logs(self) -> None:
req = ray_client_pb2.LogSettingsRequest()
req.enabled = False
self.request_queue.put(req)
self.last_req = req
| LogstreamClient |
python | kamyu104__LeetCode-Solutions | Python/count-beautiful-numbers.py | {
"start": 99,
"end": 1070
} | class ____(object):
def beautifulNumbers(self, l, r):
"""
:type l: int
:type r: int
:rtype: int
"""
def count(x):
s = map(lambda x: ord(x)-ord('0'), str(x))
dp = [collections.defaultdict(int) for _ in xrange(2)]
dp[1][1, 0] = 1
for c in s:
new_dp = [collections.defaultdict(int) for _ in xrange(2)]
for b in xrange(2):
for (mul, total), cnt in dp[b].iteritems():
for x in xrange((c if b else 9)+1):
new_dp[b and x == c][mul*(1 if total == 0 == x else x), total+x] += cnt
dp = new_dp
result = 0
for b in xrange(2):
for (mul, total), cnt in dp[b].iteritems():
if total and mul%total == 0:
result += cnt
return result
return count(r)-count(l-1)
| Solution |
python | Textualize__textual | src/textual/events.py | {
"start": 25553,
"end": 25907
} | class ____(Event, bubble=False):
"""Sent to App when a file delivery fails."""
key: str
"""The delivery key associated with the delivery."""
exception: BaseException
"""The exception that was raised during the delivery."""
name: str | None = None
"""Optional name returned to the app to identify the download."""
| DeliveryFailed |
python | mlflow__mlflow | mlflow/genai/scorers/builtin_scorers.py | {
"start": 63604,
"end": 67942
} | class ____(BuiltInScorer):
"""
Completeness evaluates whether an AI assistant fully addresses all user questions
in a single user prompt.
For evaluating the completeness of a conversation, use the ConversationCompleteness scorer
instead.
This scorer analyzes a single turn of interaction (user input and AI response) to determine
if the AI successfully answered all questions and provided all requested information.
It returns "yes" or "no".
You can invoke the scorer directly with a single input for testing, or pass it to
`mlflow.genai.evaluate` for running full evaluation on a dataset.
Args:
name: The name of the scorer. Defaults to "completeness".
model: {{ model }}
Example (direct usage):
.. code-block:: python
import mlflow
from mlflow.genai.scorers import Completeness
assessment = Completeness(name="my_completeness_check")(
inputs={"question": "What is MLflow and what are its main features?"},
outputs="MLflow is an open-source platform for managing the ML lifecycle.",
)
print(assessment) # Feedback with value "yes" or "no"
Example (with evaluate):
.. code-block:: python
import mlflow
from mlflow.genai.scorers import Completeness
data = [
{
"inputs": {"question": "What is MLflow and what are its main features?"},
"outputs": "MLflow is an open-source platform.",
},
]
result = mlflow.genai.evaluate(data=data, scorers=[Completeness()])
"""
name: str = COMPLETENESS_ASSESSMENT_NAME
model: str | None = None
required_columns: set[str] = {"inputs", "outputs"}
description: str = (
"Evaluate whether the assistant fully addresses all user questions in a single turn."
)
_judge: InstructionsJudge | None = pydantic.PrivateAttr(default=None)
def _get_judge(self) -> InstructionsJudge:
if self._judge is None:
self._judge = InstructionsJudge(
name=self.name,
instructions=self.instructions,
model=self.model,
description=self.description,
feedback_value_type=Literal["yes", "no"],
)
return self._judge
@property
def instructions(self) -> str:
return COMPLETENESS_PROMPT
def get_input_fields(self) -> list[JudgeField]:
return [
JudgeField(
name="inputs",
description=(
"A dictionary of input data, e.g. "
"{'question': 'What is MLflow and what are its main features?'}."
),
),
JudgeField(
name="outputs",
description=(
"The response from the model, e.g. "
"'MLflow is an open-source platform for managing the ML lifecycle.'"
),
),
]
def __call__(
self,
*,
inputs: dict[str, Any] | None = None,
outputs: Any | None = None,
trace: Trace | None = None,
) -> Feedback:
return self._get_judge()(
inputs=inputs,
outputs=outputs,
trace=trace,
)
def get_all_scorers() -> list[BuiltInScorer]:
"""
Returns a list of all built-in scorers.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.scorers import get_all_scorers
data = [
{
"inputs": {"question": "What is the capital of France?"},
"outputs": "The capital of France is Paris.",
"expectations": {"expected_response": "Paris is the capital city of France."},
}
]
result = mlflow.genai.evaluate(data=data, scorers=get_all_scorers())
"""
scorers = [
ExpectationsGuidelines(),
Correctness(),
RelevanceToQuery(),
RetrievalSufficiency(),
RetrievalGroundedness(),
Equivalence(),
UserFrustration(),
ConversationCompleteness(),
Completeness(),
]
if is_databricks_uri(mlflow.get_tracking_uri()):
scorers.extend([Safety(), RetrievalRelevance()])
return scorers
| Completeness |
python | numpy__numpy | numpy/fft/tests/test_helper.py | {
"start": 5167,
"end": 5562
} | class ____:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9 * fft.fftfreq(9), x)
assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10 * fft.fftfreq(10), x)
assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x)
| TestFFTFreq |
python | django__django | tests/admin_inlines/admin.py | {
"start": 5687,
"end": 5831
} | class ____(forms.ModelForm):
extra_field = forms.CharField()
class Meta:
model = FootNote
fields = "__all__"
| FootNoteForm |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 108883,
"end": 109053
} | class ____(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
| NoKeyException |
python | pandas-dev__pandas | pandas/core/indexes/accessors.py | {
"start": 16637,
"end": 18047
} | class ____(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
Examples
--------
>>> seconds_series = pd.Series(
... pd.period_range(
... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"
... )
... )
>>> seconds_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
3 2000-01-01 00:00:03
dtype: period[s]
>>> seconds_series.dt.second
0 0
1 1
2 2
3 3
dtype: int64
>>> hours_series = pd.Series(
... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")
... )
>>> hours_series
0 2000-01-01 00:00
1 2000-01-01 01:00
2 2000-01-01 02:00
3 2000-01-01 03:00
dtype: period[h]
>>> hours_series.dt.hour
0 0
1 1
2 2
3 3
dtype: int64
>>> quarters_series = pd.Series(
... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")
... )
>>> quarters_series
0 2000Q1
1 2000Q2
2 2000Q3
3 2000Q4
dtype: period[Q-DEC]
>>> quarters_series.dt.quarter
0 1
1 2
2 3
3 4
dtype: int64
"""
| PeriodProperties |
python | apache__airflow | airflow-core/tests/unit/assets/test_manager.py | {
"start": 1889,
"end": 7311
} | class ____:
def test_register_asset_change_asset_doesnt_exist(self, mock_task_instance):
asset = Asset(uri="asset_doesnt_exist", name="not exist")
mock_session = mock.Mock(spec=Session)
# Gotta mock up the query results
mock_session.scalar.return_value = None
asset_manger = AssetManager()
asset_manger.register_asset_change(
task_instance=mock_task_instance, asset=asset, session=mock_session
)
# Ensure that we have ignored the asset and _not_ created an AssetEvent or
# AssetDagRunQueue rows
mock_session.add.assert_not_called()
mock_session.merge.assert_not_called()
def test_register_asset_change(self, session, dag_maker, mock_task_instance, testing_dag_bundle):
asset_manager = AssetManager()
asset = Asset(uri="test://asset1", name="test_asset_uri", group="asset")
bundle_name = "testing"
dag1 = DagModel(dag_id="dag1", is_stale=False, bundle_name=bundle_name)
dag2 = DagModel(dag_id="dag2", is_stale=False, bundle_name=bundle_name)
session.add_all([dag1, dag2])
asm = AssetModel(uri="test://asset1/", name="test_asset_uri", group="asset")
session.add(asm)
asm.scheduled_dags = [DagScheduleAssetReference(dag_id=dag.dag_id) for dag in (dag1, dag2)]
session.execute(delete(AssetDagRunQueue))
session.flush()
asset_manager.register_asset_change(task_instance=mock_task_instance, asset=asset, session=session)
session.flush()
# Ensure we've created an asset
assert session.query(AssetEvent).filter_by(asset_id=asm.id).count() == 1
assert session.query(AssetDagRunQueue).count() == 2
@pytest.mark.usefixtures("clear_assets")
def test_register_asset_change_with_alias(
self, session, dag_maker, mock_task_instance, testing_dag_bundle
):
bundle_name = "testing"
consumer_dag_1 = DagModel(
dag_id="conumser_1", bundle_name=bundle_name, is_stale=False, fileloc="dag1.py"
)
consumer_dag_2 = DagModel(
dag_id="conumser_2", bundle_name=bundle_name, is_stale=False, fileloc="dag2.py"
)
session.add_all([consumer_dag_1, consumer_dag_2])
asm = AssetModel(uri="test://asset1/", name="test_asset_uri", group="asset")
session.add(asm)
asam = AssetAliasModel(name="test_alias_name", group="test")
session.add(asam)
asam.scheduled_dags = [
DagScheduleAssetAliasReference(alias_id=asam.id, dag_id=dag.dag_id)
for dag in (consumer_dag_1, consumer_dag_2)
]
session.execute(delete(AssetDagRunQueue))
session.flush()
asset = Asset(uri="test://asset1", name="test_asset_uri")
asset_manager = AssetManager()
asset_manager.register_asset_change(
task_instance=mock_task_instance,
asset=asset,
source_alias_names=["test_alias_name"],
session=session,
)
session.flush()
# Ensure we've created an asset
assert session.query(AssetEvent).filter_by(asset_id=asm.id).count() == 1
assert session.query(AssetDagRunQueue).count() == 2
def test_register_asset_change_no_downstreams(self, session, mock_task_instance):
asset_manager = AssetManager()
asset = Asset(uri="test://asset1", name="never_consumed")
asm = AssetModel(uri="test://asset1/", name="never_consumed", group="asset")
session.add(asm)
session.execute(delete(AssetDagRunQueue))
session.flush()
asset_manager.register_asset_change(task_instance=mock_task_instance, asset=asset, session=session)
session.flush()
# Ensure we've created an asset
assert session.query(AssetEvent).filter_by(asset_id=asm.id).count() == 1
assert session.query(AssetDagRunQueue).count() == 0
def test_register_asset_change_notifies_asset_listener(
self, session, mock_task_instance, testing_dag_bundle
):
asset_manager = AssetManager()
asset_listener.clear()
get_listener_manager().add_listener(asset_listener)
bundle_name = "testing"
asset = Asset(uri="test://asset1", name="test_asset_1")
dag1 = DagModel(dag_id="dag3", bundle_name=bundle_name)
session.add(dag1)
asm = AssetModel(uri="test://asset1/", name="test_asset_1", group="asset")
session.add(asm)
asm.scheduled_dags = [DagScheduleAssetReference(dag_id=dag1.dag_id)]
session.flush()
asset_manager.register_asset_change(task_instance=mock_task_instance, asset=asset, session=session)
session.flush()
# Ensure the listener was notified
assert len(asset_listener.changed) == 1
assert asset_listener.changed[0].uri == asset.uri
def test_create_assets_notifies_asset_listener(self, session):
asset_manager = AssetManager()
asset_listener.clear()
get_listener_manager().add_listener(asset_listener)
asset = Asset(uri="test://asset1", name="test_asset_1")
asms = asset_manager.create_assets([asset], session=session)
# Ensure the listener was notified
assert len(asset_listener.created) == 1
assert len(asms) == 1
assert asset_listener.created[0].uri == asset.uri == asms[0].uri
| TestAssetManager |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 140662,
"end": 140800
} | class ____(str, Enum):
PREFIX = "prefix"
WHITESPACE = "whitespace"
WORD = "word"
MULTILINGUAL = "multilingual"
| TokenizerType |
python | sanic-org__sanic | examples/simple_async_view.py | {
"start": 124,
"end": 511
} | class ____(HTTPMethodView):
def get(self, request):
return text("I am get method")
def post(self, request):
return text("I am post method")
def put(self, request):
return text("I am put method")
def patch(self, request):
return text("I am patch method")
def delete(self, request):
return text("I am delete method")
| SimpleView |
python | scikit-image__scikit-image | src/skimage/feature/orb.py | {
"start": 636,
"end": 13148
} | class ____(FeatureDetector, DescriptorExtractor):
"""Oriented FAST and rotated BRIEF feature detector and binary descriptor
extractor.
Parameters
----------
n_keypoints : int, optional
Number of keypoints to be returned. The function will return the best
`n_keypoints` according to the Harris corner response if more than
`n_keypoints` are detected. If not, then all the detected keypoints
are returned.
fast_n : int, optional
The `n` parameter in `skimage.feature.corner_fast`. Minimum number of
consecutive pixels out of 16 pixels on the circle that should all be
either brighter or darker w.r.t test-pixel. A point c on the circle is
darker w.r.t test pixel p if ``Ic < Ip - threshold`` and brighter if
``Ic > Ip + threshold``. Also stands for the n in ``FAST-n`` corner
detector.
fast_threshold : float, optional
The ``threshold`` parameter in ``feature.corner_fast``. Threshold used
to decide whether the pixels on the circle are brighter, darker or
similar w.r.t. the test pixel. Decrease the threshold when more
corners are desired and vice-versa.
harris_k : float, optional
The `k` parameter in `skimage.feature.corner_harris`. Sensitivity
factor to separate corners from edges, typically in range ``[0, 0.2]``.
Small values of `k` result in detection of sharp corners.
downscale : float, optional
Downscale factor for the image pyramid. Default value 1.2 is chosen so
that there are more dense scales which enable robust scale invariance
for a subsequent feature description.
n_scales : int, optional
Maximum number of scales from the bottom of the image pyramid to
extract the features from.
Attributes
----------
keypoints : (N, 2) array
Keypoint coordinates as ``(row, col)``.
scales : (N,) array
Corresponding scales.
orientations : (N,) array
Corresponding orientations in radians.
responses : (N,) array
Corresponding Harris corner responses.
descriptors : (Q, `descriptor_size`) array of dtype bool
2D array of binary descriptors of size `descriptor_size` for Q
keypoints after filtering out border keypoints with value at an
index ``(i, j)`` either being ``True`` or ``False`` representing
the outcome of the intensity comparison for i-th keypoint on j-th
decision pixel-pair. It is ``Q == np.sum(mask)``.
References
----------
.. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
"ORB: An efficient alternative to SIFT and SURF"
http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
Examples
--------
>>> from skimage.feature import ORB, match_descriptors
>>> img1 = np.zeros((100, 100))
>>> img2 = np.zeros_like(img1)
>>> rng = np.random.default_rng(19481137) # do not copy this value
>>> square = rng.random((20, 20))
>>> img1[40:60, 40:60] = square
>>> img2[53:73, 53:73] = square
>>> detector_extractor1 = ORB(n_keypoints=5)
>>> detector_extractor2 = ORB(n_keypoints=5)
>>> detector_extractor1.detect_and_extract(img1)
>>> detector_extractor2.detect_and_extract(img2)
>>> matches = match_descriptors(detector_extractor1.descriptors,
... detector_extractor2.descriptors)
>>> matches
array([[0, 0],
[1, 1],
[2, 2],
[3, 4],
[4, 3]])
>>> detector_extractor1.keypoints[matches[:, 0]]
array([[59. , 59. ],
[40. , 40. ],
[57. , 40. ],
[46. , 58. ],
[58.8, 58.8]])
>>> detector_extractor2.keypoints[matches[:, 1]]
array([[72., 72.],
[53., 53.],
[70., 53.],
[59., 71.],
[72., 72.]])
"""
def __init__(
self,
downscale=1.2,
n_scales=8,
n_keypoints=500,
fast_n=9,
fast_threshold=0.08,
harris_k=0.04,
):
self.downscale = downscale
self.n_scales = n_scales
self.n_keypoints = n_keypoints
self.fast_n = fast_n
self.fast_threshold = fast_threshold
self.harris_k = harris_k
self.keypoints = None
self.scales = None
self.responses = None
self.orientations = None
self.descriptors = None
def _build_pyramid(self, image):
image = _prepare_grayscale_input_2D(image)
return list(
pyramid_gaussian(
image, self.n_scales - 1, self.downscale, channel_axis=None
)
)
def _detect_octave(self, octave_image):
dtype = octave_image.dtype
# Extract keypoints for current octave
fast_response = corner_fast(octave_image, self.fast_n, self.fast_threshold)
keypoints = corner_peaks(fast_response, min_distance=1)
if len(keypoints) == 0:
return (
np.zeros((0, 2), dtype=dtype),
np.zeros((0,), dtype=dtype),
np.zeros((0,), dtype=dtype),
)
mask = _mask_border_keypoints(octave_image.shape, keypoints, distance=16)
keypoints = keypoints[mask]
orientations = corner_orientations(octave_image, keypoints, OFAST_MASK)
harris_response = corner_harris(octave_image, method='k', k=self.harris_k)
responses = harris_response[keypoints[:, 0], keypoints[:, 1]]
return keypoints, orientations, responses
def detect(self, image):
"""Detect oriented FAST keypoints along with the corresponding scale.
Parameters
----------
image : 2D array
Input image.
"""
check_nD(image, 2)
pyramid = self._build_pyramid(image)
keypoints_list = []
orientations_list = []
scales_list = []
responses_list = []
for octave in range(len(pyramid)):
octave_image = np.ascontiguousarray(pyramid[octave])
if np.squeeze(octave_image).ndim < 2:
# No further keypoints can be detected if the image is not really 2d
break
keypoints, orientations, responses = self._detect_octave(octave_image)
keypoints_list.append(keypoints * self.downscale**octave)
orientations_list.append(orientations)
scales_list.append(
np.full(
keypoints.shape[0],
self.downscale**octave,
dtype=octave_image.dtype,
)
)
responses_list.append(responses)
keypoints = np.vstack(keypoints_list)
orientations = np.hstack(orientations_list)
scales = np.hstack(scales_list)
responses = np.hstack(responses_list)
if keypoints.shape[0] < self.n_keypoints:
self.keypoints = keypoints
self.scales = scales
self.orientations = orientations
self.responses = responses
else:
# Choose best n_keypoints according to Harris corner response
best_indices = responses.argsort()[::-1][: self.n_keypoints]
self.keypoints = keypoints[best_indices]
self.scales = scales[best_indices]
self.orientations = orientations[best_indices]
self.responses = responses[best_indices]
def _extract_octave(self, octave_image, keypoints, orientations):
mask = _mask_border_keypoints(octave_image.shape, keypoints, distance=20)
keypoints = np.array(
keypoints[mask], dtype=np.intp, order='C', copy=NP_COPY_IF_NEEDED
)
orientations = np.array(orientations[mask], order='C', copy=False)
descriptors = _orb_loop(octave_image, keypoints, orientations)
return descriptors, mask
def extract(self, image, keypoints, scales, orientations):
"""Extract rBRIEF binary descriptors for given keypoints in image.
Note that the keypoints must be extracted using the same `downscale`
and `n_scales` parameters. Additionally, if you want to extract both
keypoints and descriptors you should use the faster
`detect_and_extract`.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint coordinates as ``(row, col)``.
scales : (N,) array
Corresponding scales.
orientations : (N,) array
Corresponding orientations in radians.
"""
check_nD(image, 2)
pyramid = self._build_pyramid(image)
descriptors_list = []
mask_list = []
# Determine octaves from scales
octaves = (np.log(scales) / np.log(self.downscale)).astype(np.intp)
for octave in range(len(pyramid)):
# Mask for all keypoints in current octave
octave_mask = octaves == octave
if np.sum(octave_mask) > 0:
octave_image = np.ascontiguousarray(pyramid[octave])
octave_keypoints = keypoints[octave_mask]
octave_keypoints /= self.downscale**octave
octave_orientations = orientations[octave_mask]
descriptors, mask = self._extract_octave(
octave_image, octave_keypoints, octave_orientations
)
descriptors_list.append(descriptors)
mask_list.append(mask)
self.descriptors = np.vstack(descriptors_list).view(bool)
self.mask_ = np.hstack(mask_list)
def detect_and_extract(self, image):
"""Detect oriented FAST keypoints and extract rBRIEF descriptors.
Note that this is faster than first calling `detect` and then
`extract`.
Parameters
----------
image : 2D array
Input image.
"""
check_nD(image, 2)
pyramid = self._build_pyramid(image)
keypoints_list = []
responses_list = []
scales_list = []
orientations_list = []
descriptors_list = []
for octave in range(len(pyramid)):
octave_image = np.ascontiguousarray(pyramid[octave])
if np.squeeze(octave_image).ndim < 2:
# No further keypoints can be detected if the image is not really 2d
break
keypoints, orientations, responses = self._detect_octave(octave_image)
if len(keypoints) == 0:
keypoints_list.append(keypoints)
responses_list.append(responses)
descriptors_list.append(np.zeros((0, 256), dtype=bool))
continue
descriptors, mask = self._extract_octave(
octave_image, keypoints, orientations
)
scaled_keypoints = keypoints[mask] * self.downscale**octave
keypoints_list.append(scaled_keypoints)
responses_list.append(responses[mask])
orientations_list.append(orientations[mask])
scales_list.append(
self.downscale**octave
* np.ones(scaled_keypoints.shape[0], dtype=np.intp)
)
descriptors_list.append(descriptors)
if len(scales_list) == 0:
raise RuntimeError(
"ORB found no features. Try passing in an image containing "
"greater intensity contrasts between adjacent pixels."
)
keypoints = np.vstack(keypoints_list)
responses = np.hstack(responses_list)
scales = np.hstack(scales_list)
orientations = np.hstack(orientations_list)
descriptors = np.vstack(descriptors_list).view(bool)
if keypoints.shape[0] < self.n_keypoints:
self.keypoints = keypoints
self.scales = scales
self.orientations = orientations
self.responses = responses
self.descriptors = descriptors
else:
# Choose best n_keypoints according to Harris corner response
best_indices = responses.argsort()[::-1][: self.n_keypoints]
self.keypoints = keypoints[best_indices]
self.scales = scales[best_indices]
self.orientations = orientations[best_indices]
self.responses = responses[best_indices]
self.descriptors = descriptors[best_indices]
| ORB |
python | pytorch__pytorch | torch/_dynamo/symbolic_convert.py | {
"start": 210505,
"end": 216368
} | class ____(InliningInstructionTranslator):
generated_items: list[VariableTracker]
# Flag whether or not the InlineGenerator should consume the entire iterator
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.generated_items = []
self.generator_exhausted = False
self.is_generator_from_ctx_manager = False
def should_compile_partial_graph(self) -> bool:
# resuming on graph break on inlined generator not supported
return False
def YIELD_VALUE(self, inst: Instruction) -> None:
top = self.pop()
self.generated_items.append(top)
if len(self.generated_items) > MAX_ITERATOR_LIMIT:
raise exc.InfiniteGeneratorError(
"Too many yield values in generator. Maybe you are inlining an infinite generator. "
f"If not, please report a bug at {PT2_ISSUE_TRACKER_URL}",
)
self.push(ConstantVariable.create(None))
if (
config.enable_faithful_generator_behavior
or self.is_generator_from_ctx_manager
):
self.symbolic_result = top
# Stop tracing
raise YieldValueOp
def GET_YIELD_FROM_ITER(self, inst: Instruction) -> None:
tos = self.stack[-1]
if not isinstance(tos, ListIteratorVariable):
self.pop()
res = BuiltinVariable(iter).call_function(self, [tos], {}) # type: ignore[arg-type]
self.push(res)
def RETURN_VALUE(self, inst: Instruction) -> None:
self.generator_exhausted = True
return super().RETURN_VALUE(inst)
def RETURN_CONST(self, inst: Instruction) -> None:
self.generator_exhausted = True
return super().RETURN_CONST(inst)
def YIELD_FROM(self, inst: Instruction) -> None:
assert len(self.stack) >= 2
val = self.pop()
tos = self.stack[-1]
if not (isinstance(val, ConstantVariable) and val.value is None):
# invoke send
# Unreachable code - if you hit this, you are implementing generator support and have
# lifted the `unimplemented("generator")` in frame conversion. This codepath handles
# subgenerator and lines up with this line in Python 3.10
# https://github.com/python/cpython/blob/3.10/Python/ceval.c#L2599
unimplemented(
gb_type="Unreachable sub-generator code",
context="",
explanation="Should only be encountered while implementing generator support.",
hints=[],
)
try:
val = tos.next_variable(self)
except (StopIteration, exc.ObservedUserStopIteration) as ex:
if isinstance(ex, exc.ObservedUserStopIteration):
exc.handle_observed_exception(self)
# The iterator is exhausted. Stop the loop and return.
self.pop()
self.push(ConstantVariable.create(ex.value))
else:
# Repeat the YIELD_FROM instruction in the next eval loop
assert (
isinstance(self.instruction_pointer, int)
and self.instruction_pointer > 0
)
self.instruction_pointer -= 1
self.push(val)
# Add the value to yield into generated_items and replace the top of the stack with None
self.YIELD_VALUE(inst)
def SEND(self, inst: Instruction) -> None:
assert len(self.stack) >= 2
val = self.pop()
tos = self.stack[-1]
if isinstance(tos, (IteratorVariable, LocalGeneratorObjectVariable)) or (
isinstance(tos, UserDefinedObjectVariable)
and isinstance(tos.value, collections.abc.Iterator)
):
if isinstance(val, ConstantVariable) and val.value is None:
try:
val = tos.next_variable(self) # type: ignore[arg-type]
except (StopIteration, exc.ObservedUserStopIteration) as ex:
# To implement SEND, we have to look at the implementation
# when the iterator returns StopIteration. This translates to this code
# 3.11: https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2613-L2619
# 3.12: https://github.com/python/cpython/blob/3.12/Python/bytecodes.c#L863-L866
# The implementation is different in 3.11 and 3.12. In 3.12, we rely
# on END_SEND to clean up. In 3.11, SEND does the cleanup as well.
if sys.version_info < (3, 12):
self.pop() # Python 3.12 uses new opcode END_SEND
self.push(ConstantVariable.create(ex.value))
self.jump(inst)
else:
self.push(val)
else:
# invoke send
# Unreachable code - if you hit this, you are implementing generator support and have
# lifted the `unimplemented("generator")` in frame conversion. This codepath handles
# subgenerator and lines up with this line in Python 3.11
# https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2597
unimplemented(
gb_type="Unreachable sub-generator code",
context="",
explanation="Should only be encountered while implementing generator support.",
hints=[],
)
else:
unimplemented(
gb_type="SEND with bad type",
context=f"TOS type: {typestr(tos)}",
explanation=f"Attempted to SEND with unsupported type {typestr(tos)}.",
hints=[],
)
| InliningGeneratorInstructionTranslator |
python | altair-viz__altair | altair/vegalite/v6/api.py | {
"start": 145119,
"end": 148040
} | class ____(channels._EncodingMixin):
data: Any
def facet(
self,
facet: Optional[str | Facet] = Undefined,
row: Optional[str | FacetFieldDef | Row] = Undefined,
column: Optional[str | FacetFieldDef | Column] = Undefined,
data: Optional[ChartDataType] = Undefined,
columns: Optional[int] = Undefined,
**kwargs: Any,
) -> FacetChart:
"""
Create a facet chart from the current chart.
Faceted charts require data to be specified at the top level; if data
is not specified, the data from the current chart will be used at the
top level.
Parameters
----------
facet : string, Facet (optional)
The data column to use as an encoding for a wrapped facet.
If specified, then neither row nor column may be specified.
column : string, Column, FacetFieldDef (optional)
The data column to use as an encoding for a column facet.
May be combined with row argument, but not with facet argument.
row : string or Row, FacetFieldDef (optional)
The data column to use as an encoding for a row facet.
May be combined with column argument, but not with facet argument.
data : string or dataframe (optional)
The dataset to use for faceting. If not supplied, then data must
be specified in the top-level chart that calls this method.
columns : integer
the maximum number of columns for a wrapped facet.
Returns
-------
self :
for chaining
"""
facet_specified = facet is not Undefined
rowcol_specified = row is not Undefined or column is not Undefined
if facet_specified and rowcol_specified:
msg = "facet argument cannot be combined with row/column argument."
raise ValueError(msg)
self = _top_schema_base(self)
if data is Undefined:
if self.data is Undefined:
msg = (
"Facet charts require data to be specified at the top level. "
"If you are trying to facet layered or concatenated charts, "
"ensure that the same data variable is passed to each chart "
"or specify the data inside the facet method instead."
)
raise ValueError(msg)
self = _top_schema_base(self).copy(deep=False)
data, self.data = self.data, Undefined
f: Facet | FacetMapping
if not utils.is_undefined(facet):
f = channels.Facet(facet) if isinstance(facet, str) else facet
else:
r: Any = row
f = FacetMapping(row=r, column=column)
return FacetChart(spec=self, facet=f, data=data, columns=columns, **kwargs) # pyright: ignore[reportArgumentType]
| _EncodingMixin |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 96195,
"end": 112210
} | class ____:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self,
location: str | None = None,
metadata: _MetadataType = None,
project_name: str | None = None,
version: str | None = None,
py_version: str | None = PY_MAJOR,
platform: str | None = None,
precedence: int = EGG_DIST,
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(
cls,
location: str,
basename: StrPath,
metadata: _MetadataType = None,
**kw: int, # We could set `precedence` explicitly, but keeping this as `**kw` for full backwards and subclassing compatibility
) -> Distribution:
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location,
metadata,
project_name=project_name,
version=version,
py_version=py_version,
platform=platform,
**kw,
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self._forgiving_parsed_version,
self.precedence,
self.key,
self.location,
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other: Distribution):
return self.hashcmp < other.hashcmp
def __le__(self, other: Distribution):
return self.hashcmp <= other.hashcmp
def __gt__(self, other: Distribution):
return self.hashcmp > other.hashcmp
def __ge__(self, other: Distribution):
return self.hashcmp >= other.hashcmp
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other: object):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
try:
self._parsed_version = parse_version(self.version)
except _packaging_version.InvalidVersion as ex:
info = f"(package: {self.project_name})"
if hasattr(ex, "add_note"):
ex.add_note(info) # PEP 678
raise
raise _packaging_version.InvalidVersion(f"{str(ex)} {info}") from None
return self._parsed_version
@property
def _forgiving_parsed_version(self):
try:
return self.parsed_version
except _packaging_version.InvalidVersion as ex:
self._parsed_version = parse_version(_forgiving_version(self.version))
notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678
msg = f"""!!\n\n
*************************************************************************
{str(ex)}\n{notes}
This is a long overdue deprecation.
For the time being, `pkg_resources` will use `{self._parsed_version}`
as a replacement to avoid breaking existing environments,
but no future compatibility is guaranteed.
If you maintain package {self.project_name} you should implement
the relevant changes to adequate the project to PEP 440 immediately.
*************************************************************************
\n\n!!
"""
warnings.warn(msg, DeprecationWarning)
return self._parsed_version
@property
def version(self):
try:
return self._version
except AttributeError as e:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = ("Missing 'Version:' header and/or {} file at path: {}").format(
self.PKG_INFO, path
)
raise ValueError(msg, self) from e
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm: dict[str | None, list[Requirement]]):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra: str | None = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker) or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras: Iterable[str] = ()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps: list[Requirement] = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError as e:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
) from e
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
yield from self.get_metadata_lines(name)
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
return _version_from_file(lines)
def activate(self, path: list[str] | None = None, replace: bool = False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path and self.location is not None:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name),
to_filename(self.version),
self.py_version or PY_MAJOR,
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super().__dir__())
| set(attr for attr in self._provider.__dir__() if not attr.startswith('_'))
)
@classmethod
def from_filename(
cls,
filename: StrPath,
metadata: _MetadataType = None,
**kw: int, # We could set `precedence` explicitly, but keeping this as `**kw` for full backwards and subclassing compatibility
):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata, **kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, _packaging_version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group: str, name: str) -> _ResolvedEntryPoint:
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
@overload
def get_entry_map(self, group: None = None) -> dict[str, dict[str, EntryPoint]]: ...
@overload
def get_entry_map(self, group: str) -> dict[str, EntryPoint]: ...
def get_entry_map(self, group: str | None = None):
"""Return the entry point map for `group`, or the full entry map"""
if not hasattr(self, "_ep_map"):
self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return self._ep_map.get(group, {})
return self._ep_map
def get_entry_info(self, group: str, name: str):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
# FIXME: 'Distribution.insert_on' is too complex (13)
def insert_on( # noqa: C901
self,
path: list[str],
loc=None,
replace: bool = False,
):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (
modname not in sys.modules
or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (
normalize_path(fn).startswith(loc) or fn.startswith(self.location)
):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
except SystemError:
# TODO: remove this except clause when python/cpython#103632 is fixed.
return False
return True
def clone(self, **kw: str | int | IResourceProvider | None):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
# Unsafely unpacking. But keeping **kw for backwards and subclassing compatibility
return self.__class__(**kw) # type:ignore[arg-type]
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
| Distribution |
python | PrefectHQ__prefect | tests/server/models/test_task_run_states.py | {
"start": 10069,
"end": 11068
} | class ____:
async def test_delete_task_run_state(self, db, task_run, session):
# create a task run to read
task_run_state = (
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(),
)
).state
assert await models.task_run_states.delete_task_run_state(
db, session=session, task_run_state_id=task_run_state.id
)
# make sure the task run state is deleted
result = await models.task_run_states.read_task_run_state(
session=session, task_run_state_id=task_run_state.id
)
assert result is None
async def test_delete_task_run_state_returns_false_if_does_not_exist(
self, db, session
):
result = await models.task_run_states.delete_task_run_state(
db, session=session, task_run_state_id=uuid4()
)
assert not result
| TestDeleteTaskRunState |
python | django__django | tests/select_related/models.py | {
"start": 1999,
"end": 2314
} | class ____(models.Model):
tag = models.CharField(max_length=30)
content_type = models.ForeignKey(
ContentType, models.CASCADE, related_name="select_related_tagged_items"
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
| TaggedItem |
python | python-poetry__poetry | tests/utils/env/test_env.py | {
"start": 1259,
"end": 20183
} | class ____(VirtualEnv):
def __init__(
self,
path: Path,
base: Path | None = None,
sys_path: list[str] | None = None,
) -> None:
super().__init__(path, base=base)
self._sys_path = sys_path
@property
def sys_path(self) -> list[str]:
if self._sys_path is not None:
return self._sys_path
return super().sys_path
def test_virtualenvs_with_spaces_in_their_path_work_as_expected(
tmp_path: Path, manager: EnvManager
) -> None:
venv_path = tmp_path / "Virtual Env"
manager.build_venv(venv_path)
venv = VirtualEnv(venv_path)
assert venv.run("python", "-V").startswith("Python")
def test_env_commands_with_spaces_in_their_arg_work_as_expected(
tmp_path: Path, manager: EnvManager
) -> None:
venv_path = tmp_path / "Virtual Env"
manager.build_venv(venv_path)
venv = VirtualEnv(venv_path)
output = venv.run("python", str(venv.pip), "--version")
assert re.match(r"pip \S+ from", output)
@pytest.mark.parametrize("differing_platform", [True, False])
def test_env_get_supported_tags_matches_inside_virtualenv(
tmp_path: Path, manager: EnvManager, mocker: MockerFixture, differing_platform: bool
) -> None:
venv_path = tmp_path / "Virtual Env"
manager.build_venv(venv_path)
venv = VirtualEnv(venv_path)
run_python_script_spy = mocker.spy(venv, "run_python_script")
# determine expected tags before patching sysconfig!
expected_tags = list(packaging.tags.sys_tags())
if differing_platform:
mocker.patch("sysconfig.get_platform", return_value="some_other_platform")
expected_call_count = 2
else:
expected_call_count = 1
assert venv.get_supported_tags() == expected_tags
assert run_python_script_spy.call_count == expected_call_count
@pytest.mark.skipif(
sys.implementation.name != "cpython",
reason="free threading is only relevant for CPython",
)
def test_env_get_supported_tags_free_threading(
tmp_path: Path, manager: EnvManager
) -> None:
venv_path = tmp_path / "Virtual Env"
manager.build_venv(venv_path)
venv = VirtualEnv(venv_path)
if venv.marker_env["free_threading"]:
assert venv.get_supported_tags() == list(packaging.tags.sys_tags())
else:
assert not any(t.abi.endswith("t") for t in venv.get_supported_tags())
venv.marker_env["free_threading"] = True
assert any(t.abi.endswith("t") for t in venv.get_supported_tags())
@pytest.mark.skipif(os.name == "nt", reason="Symlinks are not support for Windows")
def test_env_has_symlinks_on_nix(tmp_path: Path, tmp_venv: VirtualEnv) -> None:
assert os.path.islink(tmp_venv.python)
def test_run_with_keyboard_interrupt(
tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture
) -> None:
mocker.patch("subprocess.check_output", side_effect=KeyboardInterrupt())
with pytest.raises(KeyboardInterrupt):
tmp_venv.run("python", "-c", MINIMAL_SCRIPT)
subprocess.check_output.assert_called_once() # type: ignore[attr-defined]
def test_call_with_keyboard_interrupt(
tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture
) -> None:
mocker.patch("subprocess.check_call", side_effect=KeyboardInterrupt())
kwargs = {"call": True}
with pytest.raises(KeyboardInterrupt):
tmp_venv.run("python", "-", **kwargs)
subprocess.check_call.assert_called_once() # type: ignore[attr-defined]
def test_run_with_called_process_error(
tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture
) -> None:
mocker.patch(
"subprocess.check_output",
side_effect=subprocess.CalledProcessError(
42, "some_command", "some output", "some error"
),
)
with pytest.raises(EnvCommandError) as error:
tmp_venv.run("python", "-c", MINIMAL_SCRIPT)
subprocess.check_output.assert_called_once() # type: ignore[attr-defined]
assert "some output" in str(error.value)
assert "some error" in str(error.value)
def test_call_no_input_with_called_process_error(
tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture
) -> None:
mocker.patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
42, "some_command", "some output", "some error"
),
)
kwargs = {"call": True}
with pytest.raises(EnvCommandError) as error:
tmp_venv.run("python", "-", **kwargs)
subprocess.check_call.assert_called_once() # type: ignore[attr-defined]
assert "some output" in str(error.value)
assert "some error" in str(error.value)
def test_check_output_with_called_process_error(
tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture
) -> None:
mocker.patch(
"subprocess.check_output",
side_effect=subprocess.CalledProcessError(
42, "some_command", "some output", "some error"
),
)
with pytest.raises(EnvCommandError) as error:
tmp_venv.run("python", "-")
subprocess.check_output.assert_called_once() # type: ignore[attr-defined]
assert "some output" in str(error.value)
assert "some error" in str(error.value)
@pytest.mark.parametrize("out", ["sys.stdout", "sys.stderr"])
def test_call_does_not_block_on_full_pipe(
tmp_path: Path, tmp_venv: VirtualEnv, out: str
) -> None:
"""see https://github.com/python-poetry/poetry/issues/7698"""
script = tmp_path / "script.py"
script.write_text(
f"""\
import sys
for i in range(10000):
print('just print a lot of text to fill the buffer', file={out})
""",
encoding="utf-8",
)
def target(result: list[int]) -> None:
tmp_venv.run("python", str(script), call=True)
result.append(0)
results: list[int] = []
# use a separate thread, so that the test does not block in case of error
thread = Thread(target=target, args=(results,))
thread.start()
thread.join(1) # must not block
assert results and results[0] == 0
def test_run_python_script_called_process_error(
tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture
) -> None:
mocker.patch(
"subprocess.run",
side_effect=subprocess.CalledProcessError(
42, "some_command", "some output", "some error"
),
)
with pytest.raises(EnvCommandError) as error:
tmp_venv.run_python_script(MINIMAL_SCRIPT)
assert "some output" in str(error.value)
assert "some error" in str(error.value)
def test_run_python_script_only_stdout(tmp_path: Path, tmp_venv: VirtualEnv) -> None:
output = tmp_venv.run_python_script(
"import sys; print('some warning', file=sys.stderr); print('some output')"
)
assert "some output" in output
assert "some warning" not in output
def test_system_env_has_correct_paths() -> None:
env = SystemEnv(Path(sys.prefix))
paths = env.paths
assert paths.get("purelib") is not None
assert paths.get("platlib") is not None
assert paths.get("scripts") is not None
assert env.site_packages.path == Path(paths["purelib"])
assert paths["include"] is not None
@pytest.mark.parametrize(
"enabled",
[True, False],
)
def test_system_env_usersite(mocker: MockerFixture, enabled: bool) -> None:
mocker.patch("site.check_enableusersite", return_value=enabled)
env = SystemEnv(Path(sys.prefix))
assert (enabled and env.usersite is not None) or (
not enabled and env.usersite is None
)
def test_venv_has_correct_paths(tmp_venv: VirtualEnv) -> None:
paths = tmp_venv.paths
assert paths.get("purelib") is not None
assert paths.get("platlib") is not None
assert paths.get("scripts") is not None
assert tmp_venv.site_packages.path == Path(paths["purelib"])
assert paths["include"] == str(
tmp_venv.path.joinpath(
f"include/site/python{tmp_venv.version_info[0]}.{tmp_venv.version_info[1]}"
)
)
@pytest.mark.parametrize("with_system_site_packages", [True, False])
def test_env_system_packages(
tmp_path: Path, poetry: Poetry, with_system_site_packages: bool
) -> None:
venv_path = tmp_path / "venv"
pyvenv_cfg = venv_path / "pyvenv.cfg"
EnvManager(poetry).build_venv(
path=venv_path, flags={"system-site-packages": with_system_site_packages}
)
env = VirtualEnv(venv_path)
assert (
f"include-system-site-packages = {str(with_system_site_packages).lower()}"
in pyvenv_cfg.read_text(encoding="utf-8")
)
assert env.includes_system_site_packages is with_system_site_packages
def test_generic_env_system_packages(poetry: Poetry) -> None:
"""https://github.com/python-poetry/poetry/issues/8646"""
env = GenericEnv(Path(sys.base_prefix))
assert not env.includes_system_site_packages
@pytest.mark.parametrize("with_system_site_packages", [True, False])
def test_env_system_packages_are_relative_to_lib(
tmp_path: Path, poetry: Poetry, with_system_site_packages: bool
) -> None:
venv_path = tmp_path / "venv"
EnvManager(poetry).build_venv(
path=venv_path, flags={"system-site-packages": with_system_site_packages}
)
env = VirtualEnv(venv_path)
# These are Poetry's own dependencies.
# They should not be relative to the virtualenv's lib directory.
for dist in metadata.distributions():
assert not env.is_path_relative_to_lib(
Path(str(dist._path)) # type: ignore[attr-defined]
)
# Checking one package is sufficient
break
else:
pytest.fail("No distributions found in Poetry's own environment")
# These are the virtual environments' base env packages,
# in this case the system site packages.
for dist in env.parent_env.site_packages.distributions():
assert (
env.is_path_relative_to_lib(
Path(str(dist._path)) # type: ignore[attr-defined]
)
is with_system_site_packages
)
# Checking one package is sufficient
break
else:
pytest.fail("No distributions found in the base environment of the virtualenv")
@pytest.mark.parametrize(
("flags", "packages"),
[
({"no-pip": False}, {"pip"}),
({"no-pip": True}, set()),
({}, set()),
],
)
def test_env_no_pip(
tmp_path: Path, poetry: Poetry, flags: dict[str, str | bool], packages: set[str]
) -> None:
venv_path = tmp_path / "venv"
EnvManager(poetry).build_venv(path=venv_path, flags=flags)
env = VirtualEnv(venv_path)
installed_repository = InstalledRepository.load(env=env, with_dependencies=True)
installed_packages = {
package.name
for package in installed_repository.packages
# workaround for BSD test environments
if package.name != "sqlite3"
}
assert installed_packages == packages
def test_env_finds_the_correct_executables(tmp_path: Path, manager: EnvManager) -> None:
venv_path = tmp_path / "Virtual Env"
manager.build_venv(venv_path, with_pip=True)
venv = VirtualEnv(venv_path)
default_executable = expected_executable = f"python{'.exe' if WINDOWS else ''}"
default_pip_executable = expected_pip_executable = f"pip{'.exe' if WINDOWS else ''}"
major_executable = f"python{sys.version_info[0]}{'.exe' if WINDOWS else ''}"
major_pip_executable = f"pip{sys.version_info[0]}{'.exe' if WINDOWS else ''}"
if (
venv._bin_dir.joinpath(default_executable).exists()
and venv._bin_dir.joinpath(major_executable).exists()
):
venv._bin_dir.joinpath(default_executable).unlink()
expected_executable = major_executable
if (
venv._bin_dir.joinpath(default_pip_executable).exists()
and venv._bin_dir.joinpath(major_pip_executable).exists()
):
venv._bin_dir.joinpath(default_pip_executable).unlink()
expected_pip_executable = major_pip_executable
venv = VirtualEnv(venv_path)
assert Path(venv.python).name == expected_executable
assert Path(venv.pip).name.startswith(expected_pip_executable.split(".")[0])
def test_env_finds_the_correct_executables_for_generic_env(
tmp_path: Path, manager: EnvManager
) -> None:
venv_path = tmp_path / "Virtual Env"
child_venv_path = tmp_path / "Child Virtual Env"
manager.build_venv(venv_path, with_pip=True)
parent_venv = VirtualEnv(venv_path)
manager.build_venv(child_venv_path, executable=parent_venv.python, with_pip=True)
venv = GenericEnv(parent_venv.path, child_env=VirtualEnv(child_venv_path))
expected_executable = (
f"python{sys.version_info[0]}.{sys.version_info[1]}{'.exe' if WINDOWS else ''}"
)
expected_pip_executable = (
f"pip{sys.version_info[0]}.{sys.version_info[1]}{'.exe' if WINDOWS else ''}"
)
if WINDOWS:
expected_executable = "python.exe"
expected_pip_executable = "pip.exe"
assert Path(venv.python).name == expected_executable
assert Path(venv.pip).name == expected_pip_executable
def test_env_finds_fallback_executables_for_generic_env(
tmp_path: Path, manager: EnvManager
) -> None:
venv_path = tmp_path / "Virtual Env"
child_venv_path = tmp_path / "Child Virtual Env"
manager.build_venv(venv_path, with_pip=True)
parent_venv = VirtualEnv(venv_path)
manager.build_venv(child_venv_path, executable=parent_venv.python, with_pip=True)
venv = GenericEnv(parent_venv.path, child_env=VirtualEnv(child_venv_path))
default_executable = f"python{'.exe' if WINDOWS else ''}"
major_executable = f"python{sys.version_info[0]}{'.exe' if WINDOWS else ''}"
minor_executable = (
f"python{sys.version_info[0]}.{sys.version_info[1]}{'.exe' if WINDOWS else ''}"
)
expected_executable = minor_executable
if (
venv._bin_dir.joinpath(expected_executable).exists()
and venv._bin_dir.joinpath(major_executable).exists()
):
venv._bin_dir.joinpath(expected_executable).unlink()
expected_executable = major_executable
if (
venv._bin_dir.joinpath(expected_executable).exists()
and venv._bin_dir.joinpath(default_executable).exists()
):
venv._bin_dir.joinpath(expected_executable).unlink()
expected_executable = default_executable
default_pip_executable = f"pip{'.exe' if WINDOWS else ''}"
major_pip_executable = f"pip{sys.version_info[0]}{'.exe' if WINDOWS else ''}"
minor_pip_executable = (
f"pip{sys.version_info[0]}.{sys.version_info[1]}{'.exe' if WINDOWS else ''}"
)
expected_pip_executable = minor_pip_executable
if (
venv._bin_dir.joinpath(expected_pip_executable).exists()
and venv._bin_dir.joinpath(major_pip_executable).exists()
):
venv._bin_dir.joinpath(expected_pip_executable).unlink()
expected_pip_executable = major_pip_executable
if (
venv._bin_dir.joinpath(expected_pip_executable).exists()
and venv._bin_dir.joinpath(default_pip_executable).exists()
):
venv._bin_dir.joinpath(expected_pip_executable).unlink()
expected_pip_executable = default_pip_executable
if not venv._bin_dir.joinpath(expected_executable).exists():
expected_executable = default_executable
if not venv._bin_dir.joinpath(expected_pip_executable).exists():
expected_pip_executable = default_pip_executable
venv = GenericEnv(parent_venv.path, child_env=VirtualEnv(child_venv_path))
assert Path(venv.python).name == expected_executable
assert Path(venv.pip).name == expected_pip_executable
@pytest.fixture
def extended_without_setup_poetry(
fixture_dir: FixtureDirGetter, set_project_context: SetProjectContext
) -> Iterator[Poetry]:
with set_project_context("extended_project_without_setup") as cwd:
yield Factory().create_poetry(cwd)
def test_build_environment_called_build_script_specified(
mocker: MockerFixture,
extended_without_setup_poetry: Poetry,
) -> None:
patched_install = mocker.patch("poetry.utils.isolated_build.IsolatedEnv.install")
with ephemeral_environment() as project_env:
import poetry.utils.env
spy = mocker.spy(poetry.utils.env, "ephemeral_environment")
with build_environment(extended_without_setup_poetry, project_env):
assert patched_install.call_count == 1
assert patched_install.call_args == mocker.call(["poetry-core", "cython"])
assert spy.call_count == 1
def test_build_environment_not_called_without_build_script_specified(
mocker: MockerFixture, poetry: Poetry, tmp_path: Path
) -> None:
project_env = MockEnv(path=tmp_path / "project")
ephemeral_env = MockEnv(path=tmp_path / "ephemeral")
mocker.patch(
"poetry.utils.env.ephemeral_environment"
).return_value.__enter__.return_value = ephemeral_env
with build_environment(poetry, project_env) as env:
assert env == project_env
assert not env.executed # type: ignore[attr-defined]
def test_command_from_bin_preserves_relative_path(manager: EnvManager) -> None:
# https://github.com/python-poetry/poetry/issues/7959
env = manager.get()
command = env.get_command_from_bin("./foo.py")
assert command == ["./foo.py"]
@pytest.fixture
def system_env_read_only(system_env: SystemEnv, mocker: MockerFixture) -> SystemEnv:
original_is_dir_writable = is_dir_writable
read_only_paths = {system_env.paths[key] for key in SCHEME_NAMES}
def mock_is_dir_writable(path: Path, create: bool = False) -> bool:
if str(path) in read_only_paths:
return False
return original_is_dir_writable(path, create)
mocker.patch("poetry.utils.env.base_env.is_dir_writable", new=mock_is_dir_writable)
return system_env
def test_env_scheme_dict_returns_original_when_writable(system_env: SystemEnv) -> None:
assert not DeepDiff(system_env.scheme_dict, system_env.paths, ignore_order=True)
def test_env_scheme_dict_returns_modified_when_read_only(
system_env_read_only: SystemEnv,
) -> None:
scheme_dict = system_env_read_only.scheme_dict
assert DeepDiff(scheme_dict, system_env_read_only.paths, ignore_order=True)
paths = system_env_read_only.paths
assert all(
Path(scheme_dict[scheme]).exists()
and scheme_dict[scheme].startswith(paths["userbase"])
for scheme in SCHEME_NAMES
)
def test_marker_env_is_equal_for_all_envs(tmp_path: Path, manager: EnvManager) -> None:
venv_path = tmp_path / "Virtual Env"
manager.build_venv(venv_path)
venv = VirtualEnv(venv_path)
generic_env = GenericEnv(venv.path)
system_env = SystemEnv(Path(sys.prefix))
venv_marker_env = venv.marker_env
generic_marker_env = generic_env.marker_env
system_marker_env = system_env.marker_env
assert venv_marker_env == generic_marker_env
assert venv_marker_env == system_marker_env
| MockVirtualEnv |
python | encode__django-rest-framework | tests/models.py | {
"start": 1571,
"end": 1865
} | class ____(RESTFrameworkModel):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, related_name='sources',
help_text='Target', verbose_name='Target',
on_delete=models.CASCADE)
| ForeignKeySource |
python | plotly__plotly.py | plotly/graph_objs/layout/polar/radialaxis/_tickfont.py | {
"start": 235,
"end": 9940
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.polar.radialaxis"
_path_str = "layout.polar.radialaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.polar.r
adialaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.polar.radialaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | getsentry__sentry | tests/sentry/integrations/github/test_integration.py | {
"start": 4036,
"end": 86349
} | class ____(IntegrationTestCase):
provider = GitHubIntegrationProvider
base_url = "https://api.github.com"
def setUp(self) -> None:
super().setUp()
self.installation_id = "install_1"
self.user_id = "user_1"
self.app_id = "app_1"
self.access_token = "xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
self.expires_at = "3000-01-01T00:00:00Z"
self._stub_github()
plugins.register(GitHubPlugin)
def tearDown(self) -> None:
responses.reset()
plugins.unregister(GitHubPlugin)
super().tearDown()
def _setup_assignee_sync_test(
self,
user_email: str = "foo@example.com",
external_name: str = "@octocat",
external_id: str = "octocat",
issue_key: str = "Test-Organization/foo#123",
create_external_user: bool = True,
) -> tuple:
"""
Common setup for assignee sync tests.
Returns:
tuple: (user, installation, external_issue, integration, group)
"""
user = serialize_rpc_user(self.create_user(email=user_email))
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
integration.metadata.update(
{
"access_token": self.access_token,
"expires_at": self.expires_at,
}
)
integration.save()
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
group = self.create_group()
if create_external_user:
self.create_external_user(
user=user,
organization=self.organization,
integration=integration,
provider=ExternalProviders.GITHUB.value,
external_name=external_name,
external_id=external_id,
)
external_issue = self.create_integration_external_issue(
group=group,
integration=integration,
key=issue_key,
)
return user, installation, external_issue, integration, group
@pytest.fixture(autouse=True)
def stub_get_jwt(self):
with mock.patch.object(client, "get_jwt", return_value="jwt_token_1"):
yield
@pytest.fixture(autouse=True)
def stub_get_jwt_function(self):
with mock.patch("sentry.integrations.github.utils.get_jwt", return_value="jwt_token_1"):
yield
def _stub_github(self):
"""This stubs the calls related to a Github App"""
self.gh_org = "Test-Organization"
pp = 1
access_token = "xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
responses.add(
responses.POST,
"https://github.com/login/oauth/access_token",
body=f"access_token={access_token}",
)
responses.add(responses.GET, self.base_url + "/user", json={"login": "octocat"})
responses.add(
responses.POST,
self.base_url + f"/app/installations/{self.installation_id}/access_tokens",
json={
"token": self.access_token,
"expires_at": self.expires_at,
"permissions": {
"administration": "read",
"contents": "read",
"issues": "write",
"metadata": "read",
"pull_requests": "read",
},
"repository_selection": "all",
},
)
repositories: dict[str, Any] = {
"xyz": {
"name": "xyz",
"full_name": "Test-Organization/xyz",
"default_branch": "master",
},
"foo": {
"id": 1296269,
"name": "foo",
"full_name": "Test-Organization/foo",
"default_branch": "master",
},
"bar": {
"id": 9876574,
"name": "bar",
"full_name": "Test-Organization/bar",
"default_branch": "main",
},
"baz": {
"id": 1276555,
"name": "baz",
"full_name": "Test-Organization/baz",
"default_branch": "master",
},
"archived": {
"archived": True,
},
}
self.repositories = repositories
len_repos = len(repositories)
api_url = f"{self.base_url}/installation/repositories"
first = f'<{api_url}?per_page={pp}&page=1>; rel="first"'
last = f'<{api_url}?per_page={pp}&page={len_repos}>; rel="last"'
def gen_link(page: int, text: str) -> str:
return f'<{api_url}?per_page={pp}&page={page}>; rel="{text}"'
responses.add(
responses.GET,
url=api_url,
match=[responses.matchers.query_param_matcher({"per_page": pp})],
json={"total_count": len_repos, "repositories": [repositories["foo"]]},
headers={"link": ", ".join([gen_link(2, "next"), last])},
)
responses.add(
responses.GET,
url=self.base_url + "/installation/repositories",
match=[responses.matchers.query_param_matcher({"per_page": pp, "page": 2})],
json={"total_count": len_repos, "repositories": [repositories["bar"]]},
headers={"link": ", ".join([gen_link(1, "prev"), gen_link(3, "next"), last, first])},
)
responses.add(
responses.GET,
url=self.base_url + "/installation/repositories",
match=[responses.matchers.query_param_matcher({"per_page": pp, "page": 3})],
json={"total_count": len_repos, "repositories": [repositories["baz"]]},
headers={"link": ", ".join([gen_link(2, "prev"), first])},
)
# This is for when we're not testing the pagination logic
responses.add(
responses.GET,
url=self.base_url + "/installation/repositories",
match=[responses.matchers.query_param_matcher({"per_page": 100})],
json={
"total_count": len(repositories),
"repositories": [repo for repo in repositories.values()],
},
)
responses.add(
responses.GET,
self.base_url + f"/app/installations/{self.installation_id}",
json={
"id": self.installation_id,
"app_id": self.app_id,
"account": {
"id": 60591805,
"login": "Test Organization",
"avatar_url": "http://example.com/avatar.png",
"html_url": "https://github.com/Test-Organization",
"type": "Organization",
},
},
)
responses.add(responses.GET, self.base_url + "/repos/Test-Organization/foo/hooks", json=[])
# Mock response from GH /users/memberships endpoint
# (what is this user's role in each org, with this integration installed on)
responses.add(
responses.GET,
f"{self.base_url}/user/memberships/orgs",
json=[
{
"state": "active",
"role": "admin",
"organization": {
"login": "santry",
"id": 1,
"avatar_url": "https://all-the.bufo.zone/bufo-adding-bugs-to-the-code.gif",
},
},
{
"state": "disabled",
"role": "admin",
"organization": {
"login": "bufo-bot",
"id": 2,
"avatar_url": "https://all-the.bufo.zone/bufo-achieving-coding-flow.png",
},
},
{
"state": "active",
"role": "member",
"organization": {
"login": "poggers-org",
"id": 3,
"avatar_url": "https://all-the.bufo.zone/bufo-bonk.png",
},
},
],
)
# Logic to get a tree for a repo
# https://api.github.com/repos/getsentry/sentry/git/trees/master?recursive=1
for repo_name, values in TREE_RESPONSES.items():
responses.add(
responses.GET,
f"{self.base_url}/repos/Test-Organization/{repo_name}/git/trees/{repositories[repo_name]['default_branch']}?recursive=1",
json=values["body"],
status=values["status_code"],
)
def _setup_with_existing_installations(self):
self.installation_info = {
"installations": [
{
"id": 1,
"target_type": "Organization",
"account": {
"login": "santry",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pitchforks.png",
},
},
{
"id": 2,
"target_type": "User",
"account": {
"login": "bufo-bot",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pog.png",
},
},
]
}
responses.add(
responses.GET,
f"{self.base_url}/user/installations",
json=self.installation_info,
)
def _setup_without_existing_installations(self):
responses.add(
responses.GET,
f"{self.base_url}/user/installations",
json={"installations": []},
)
def _setup_select_github_organization(self):
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "github.com"
assert redirect.path == "/login/oauth/authorize"
assert (
redirect.query
== f"client_id=github-client-id&state={self.pipeline.signature}&redirect_uri=http://testserver/extensions/github/setup/"
)
# We just got resp. from GH, continue with OAuthView -> GithubOrganizationSelection
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode({"code": "12345678901234567890", "state": self.pipeline.signature}),
)
)
assert resp.status_code == 200
return resp
def _setup_choose_installation(self, installation_id: str):
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode(
{
"code": "12345678901234567890",
"state": self.pipeline.signature,
"chosen_installation_id": installation_id,
}
),
)
)
return resp
def assert_setup_flow(self):
self._setup_with_existing_installations()
# Initiate the OAuthView
self._setup_select_github_organization()
# We rendered the GithubOrganizationSelection UI and the user chose to skip
resp = self._setup_choose_installation("-1")
# GitHubInstallation redirects user to GH to choose a new organization
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "github.com"
assert redirect.path == "/apps/sentry-test-app"
# Send to GitHubInstallation the chosen organization
resp = self.client.get(
"{}?{}".format(self.setup_path, urlencode({"installation_id": self.installation_id}))
)
auth_header = responses.calls[4].request.headers["Authorization"]
assert auth_header == "Bearer jwt_token_1"
assert (
responses.calls[4].request.url
== f"https://api.github.com/app/installations/{self.installation_id}"
)
self.assertDialogSuccess(resp)
return resp
@responses.activate
def test_plugin_migration(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="github",
external_id=123,
config={"name": "Test-Organization/foo"},
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name="Not-My-Org/other",
provider="github",
external_id=321,
config={"name": "Not-My-Org/other"},
)
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
# Updates the existing Repository to belong to the new Integration
assert Repository.objects.get(id=accessible_repo.id).integration_id == integration.id
# Doesn't touch Repositories not accessible by the new Integration
assert Repository.objects.get(id=inaccessible_repo.id).integration_id is None
@responses.activate
def test_basic_flow(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == self.installation_id
assert integration.name == "Test Organization"
assert integration.metadata == {
"access_token": self.access_token,
# The metadata doesn't get saved with the timezone "Z" character
"expires_at": self.expires_at[:-1],
"icon": "http://example.com/avatar.png",
"domain_name": "github.com/Test-Organization",
"account_type": "Organization",
"account_id": 60591805,
"permissions": {
"administration": "read",
"contents": "read",
"issues": "write",
"metadata": "read",
"pull_requests": "read",
},
}
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
assert oi.config == {}
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_installation_not_found(self, mock_record: MagicMock) -> None:
# Add a 404 for an org to responses
responses.replace(
responses.GET, self.base_url + f"/app/installations/{self.installation_id}", status=404
)
# Attempt to install integration
resp = self.client.get(
"{}?{}".format(self.setup_path, urlencode({"installation_id": self.installation_id}))
)
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode(
{"code": "12345678901234567890", "state": "ddd023d87a913d5226e2a882c4c4cc05"}
),
)
)
assert b"Invalid state" in resp.content
assert_failure_metric(mock_record, GitHubInstallationError.INVALID_STATE)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@override_options({"github-app.webhook-secret": ""})
def test_github_user_mismatch(self, mock_record: MagicMock) -> None:
self._stub_github()
self._setup_without_existing_installations()
# Emulate GitHub installation
init_path_1 = "{}?{}".format(
reverse(
"sentry-organization-integrations-setup",
kwargs={
"organization_slug": self.organization.slug,
"provider_id": self.provider.key,
},
),
urlencode({"installation_id": self.installation_id}),
)
self.client.get(init_path_1)
webhook_event = orjson.loads(INSTALLATION_EVENT_EXAMPLE)
webhook_event["installation"]["id"] = self.installation_id
webhook_event["sender"]["login"] = "attacker"
resp = self.client.post(
path="/extensions/github/webhook/",
data=orjson.dumps(webhook_event),
content_type="application/json",
HTTP_X_GITHUB_EVENT="installation",
HTTP_X_HUB_SIGNATURE="sha1=d184e6717f8bfbcc291ebc8c0756ee446c6c9486",
HTTP_X_GITHUB_DELIVERY="00000000-0000-4000-8000-1234567890ab",
)
assert resp.status_code == 204
# Validate the installation user
user_2 = self.create_user("foo@example.com")
org_2 = self.create_organization(name="Rowdy Tiger", owner=user_2)
self.login_as(user_2)
init_path_2 = "{}?{}".format(
reverse(
"sentry-organization-integrations-setup",
kwargs={
"organization_slug": org_2.slug,
"provider_id": self.provider.key,
},
),
urlencode({"installation_id": self.installation_id}),
)
setup_path_2 = "{}?{}".format(
self.setup_path,
urlencode({"code": "12345678901234567890", "state": self.pipeline.signature}),
)
with self.feature({"system:multi-region": True}):
resp = self.client.get(init_path_2)
resp = self.client.get(setup_path_2)
self.assertTemplateUsed(resp, "sentry/integrations/github-integration-failed.html")
assert resp.status_code == 200
assert b'window.opener.postMessage({"success":false' in resp.content
assert b"Authenticated user is not the same as who installed the app" in resp.content
assert_failure_metric(mock_record, GitHubInstallationError.USER_MISMATCH)
@responses.activate
def test_disable_plugin_when_fully_migrated(self) -> None:
self._stub_github()
with assume_test_silo_mode(SiloMode.REGION):
project = Project.objects.create(organization_id=self.organization.id)
plugin = plugins.get("github")
plugin.enable(project)
# Accessible to new Integration - mocked in _stub_github
Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="github",
external_id="123",
config={"name": "Test-Organization/foo"},
)
# Enabled before
assert "github" in [p.slug for p in plugins.for_project(project)]
with self.tasks():
self.assert_setup_flow()
# Disabled after Integration installed
assert "github" not in [p.slug for p in plugins.for_project(project)]
@responses.activate
def test_get_repositories_search_param(self) -> None:
with self.tasks():
self.assert_setup_flow()
querystring = urlencode({"q": "fork:true org:Test Organization ex"})
responses.add(
responses.GET,
f"{self.base_url}/search/repositories?{querystring}",
json={
"items": [
{"name": "example", "full_name": "test/example", "default_branch": "master"},
{"name": "exhaust", "full_name": "test/exhaust", "default_branch": "master"},
]
},
)
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
# This searches for any repositories matching the term 'ex'
result = installation.get_repositories("ex")
assert result == [
{"identifier": "test/example", "name": "example", "default_branch": "master"},
{"identifier": "test/exhaust", "name": "exhaust", "default_branch": "master"},
]
@responses.activate
def test_get_repositories_all_and_pagination(self) -> None:
"""Fetch all repositories and test the pagination logic."""
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
with patch.object(sentry.integrations.github.client.GitHubBaseClient, "page_size", 1):
result = installation.get_repositories()
assert result == [
{"name": "foo", "identifier": "Test-Organization/foo", "default_branch": "master"},
{"name": "bar", "identifier": "Test-Organization/bar", "default_branch": "main"},
{"name": "baz", "identifier": "Test-Organization/baz", "default_branch": "master"},
]
@responses.activate
def test_get_repositories_only_first_page(self) -> None:
"""Fetch all repositories and test the pagination logic."""
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
with (
patch.object(
sentry.integrations.github.client.GitHubBaseClient, "page_number_limit", 1
),
patch.object(sentry.integrations.github.client.GitHubBaseClient, "page_size", 1),
):
result = installation.get_repositories()
assert result == [
{"name": "foo", "identifier": "Test-Organization/foo", "default_branch": "master"},
]
@responses.activate
def test_get_stacktrace_link_file_exists(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/foo"},
integration_id=integration.id,
)
path = "README.md"
version = "1234567"
default = "master"
responses.add(
responses.HEAD,
self.base_url + f"/repos/{repo.name}/contents/{path}?ref={version}",
)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
result = installation.get_stacktrace_link(repo, path, default, version)
assert result == "https://github.com/Test-Organization/foo/blob/1234567/README.md"
@responses.activate
def test_get_stacktrace_link_file_doesnt_exists(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/foo"},
integration_id=integration.id,
)
path = "README.md"
version = "master"
default = "master"
responses.add(
responses.HEAD,
self.base_url + f"/repos/{repo.name}/contents/{path}?ref={version}",
status=404,
)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
result = installation.get_stacktrace_link(repo, path, default, version)
assert not result
@responses.activate
def test_get_stacktrace_link_use_default_if_version_404(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/foo"},
integration_id=integration.id,
)
path = "README.md"
version = "12345678"
default = "master"
responses.add(
responses.HEAD,
self.base_url + f"/repos/{repo.name}/contents/{path}?ref={version}",
status=404,
)
responses.add(
responses.HEAD,
self.base_url + f"/repos/{repo.name}/contents/{path}?ref={default}",
)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
result = installation.get_stacktrace_link(repo, path, default, version)
assert result == "https://github.com/Test-Organization/foo/blob/master/README.md"
@responses.activate
def test_get_message_from_error(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
base_error = f"Error Communicating with GitHub (HTTP 404): {API_ERRORS[404]}"
assert (
installation.message_from_error(
ApiError("Not Found", code=404, url="https://api.github.com/repos/scefali")
)
== base_error
)
url = "https://api.github.com/repos/scefali/sentry-integration-example/compare/2adcab794f6f57efa8aa84de68a724e728395792...e208ee2d71e8426522f95efbdae8630fa66499ab"
assert (
installation.message_from_error(ApiError("Not Found", code=404, url=url))
== base_error
+ f" Please also confirm that the commits associated with the following URL have been pushed to GitHub: {url}"
)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_github_prevent_install_until_pending_deletion_is_complete(
self, mock_record: MagicMock
) -> None:
self._stub_github()
# First installation should be successful
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
# set installation to pending deletion
oi.status = ObjectStatus.PENDING_DELETION
oi.save()
# New Installation
self.installation_id = "1"
self._stub_github()
mock_record.reset_mock()
with self.feature({"system:multi-region": True}):
self._setup_select_github_organization()
resp = self._setup_choose_installation(str(self.installation_id))
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/integrations/github-integration-failed.html")
assert b'window.opener.postMessage({"success":false' in resp.content
assert f', "{generate_organization_url(self.organization.slug)}");'.encode() in resp.content
# Assert payload returned to main window
assert (
b'{"success":false,"data":{"error":"GitHub installation pending deletion."}}'
in resp.content
)
assert_failure_metric(mock_record, GitHubInstallationError.PENDING_DELETION)
# Delete the original Integration
oi.delete()
integration.delete()
# Try again and should be successful
self._setup_select_github_organization()
resp = self._setup_choose_installation(str(self.installation_id))
self.assertDialogSuccess(resp)
integration = Integration.objects.get(external_id=self.installation_id)
assert integration.provider == "github"
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration=integration
).exists()
def set_rate_limit(
self, remaining=MINIMUM_REQUESTS + 100, limit=5000, json_body=None, status=200
):
"""Helper class to set the rate limit.
A status code different than 200 requires a json_body
"""
response_json = (
json_body
if status != 200
else {
"resources": {
"core": {"limit": limit, "remaining": remaining, "used": "foo", "reset": 123},
"graphql": {
"limit": limit,
"remaining": remaining,
"used": "foo",
"reset": 123,
},
}
}
)
# upsert: it calls add() if not existant, otherwise, it calls replace
responses.upsert(
responses.GET, "https://api.github.com/rate_limit", json=response_json, status=status
)
def get_installation_helper(self) -> GitHubIntegration:
with self.tasks():
self.assert_setup_flow() # This somehow creates the integration
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
return installation
def _expected_trees(self, repo_info_list=None):
result = {}
# bar and baz are defined to fail, thus, do not show up in the default case
list = repo_info_list or [
("xyz", "master", ["src/xyz.py"]),
("foo", "master", ["src/sentry/api/endpoints/auth_login.py"]),
]
for repo, branch, files in list:
result[f"{self.gh_org}/{repo}"] = RepoTree(
RepoAndBranch(f"{self.gh_org}/{repo}", branch), files
)
return result
def _expected_cached_repos(self):
return [
{"full_name": f"{self.gh_org}/xyz", "default_branch": "master"},
{"full_name": f"{self.gh_org}/foo", "default_branch": "master"},
{"full_name": f"{self.gh_org}/bar", "default_branch": "main"},
{"full_name": f"{self.gh_org}/baz", "default_branch": "master"},
]
@responses.activate
def test_get_trees_for_org_works(self) -> None:
"""Fetch the tree representation of a repo"""
installation = self.get_installation_helper()
cache.clear()
self.set_rate_limit()
expected_trees = self._expected_trees()
repos_key = f"githubtrees:repositories:{self.organization.id}"
repo_key = lambda x: f"github:repo:Test-Organization/{x}:source-code"
# Check that the cache is clear
assert cache.get(repos_key) is None
assert cache.get(repo_key("foo")) is None
trees = installation.get_trees_for_org()
assert cache.get(repos_key) == self._expected_cached_repos()
assert cache.get(repo_key("foo")) == ["src/sentry/api/endpoints/auth_login.py"]
assert trees == expected_trees
# Calling a second time should produce the same results
trees = installation.get_trees_for_org()
assert trees == expected_trees
@responses.activate
def test_get_trees_for_org_prevent_exhaustion_some_repos(self) -> None:
"""Some repos will hit the network but the rest will grab from the cache."""
repos_key = f"githubtrees:repositories:{self.organization.id}"
cache.clear()
installation = self.get_installation_helper()
expected_trees = self._expected_trees(
[
("xyz", "master", ["src/xyz.py"]),
# foo will have no files because we will hit the minimum remaining requests floor
("foo", "master", []),
("bar", "main", []),
("baz", "master", []),
]
)
with patch(
"sentry.integrations.source_code_management.repo_trees.MINIMUM_REQUESTS_REMAINING",
new=5,
autospec=False,
):
# We start with one request left before reaching the minimum remaining requests floor
self.set_rate_limit(remaining=6)
assert cache.get(repos_key) is None
trees = installation.get_trees_for_org()
assert trees == expected_trees
assert cache.get(repos_key) == self._expected_cached_repos()
# Another call should not make us loose the files for xyz
self.set_rate_limit(remaining=5)
trees = installation.get_trees_for_org()
assert trees == expected_trees # xyz will have files but not foo
# We reset the remaining values
self.set_rate_limit(remaining=20)
trees = installation.get_trees_for_org()
assert trees == self._expected_trees(
[
("xyz", "master", ["src/xyz.py"]),
# Now that the rate limit is reset we should get files for foo
("foo", "master", ["src/sentry/api/endpoints/auth_login.py"]),
]
)
@responses.activate
def test_get_trees_for_org_rate_limit_401(self) -> None:
"""Sometimes the rate limit API fails from the get go."""
# Generic test set up
cache.clear() # TODO: Investigate why it did not work in the setUp method
installation = self.get_installation_helper()
# None of the repos will have any files since rate limit will fail
# with a 401 response (which makes no sense)
self.set_rate_limit(json_body={"message": "Bad credentials"}, status=401)
trees = installation.get_trees_for_org()
assert trees == self._expected_trees(
[
("xyz", "master", []),
("foo", "master", []),
("bar", "main", []),
("baz", "master", []),
]
)
# This time the rate limit will not fail, thus, it will fetch the trees
self.set_rate_limit()
trees = installation.get_trees_for_org()
assert trees == self._expected_trees(
[
("xyz", "master", ["src/xyz.py"]),
("foo", "master", ["src/sentry/api/endpoints/auth_login.py"]),
]
)
# This time we will get a 401 but be will load from the cache (unlike the first time)
self.set_rate_limit(json_body={"message": "Bad credentials"}, status=401)
trees = installation.get_trees_for_org()
assert trees == self._expected_trees(
[
("xyz", "master", ["src/xyz.py"]),
("foo", "master", ["src/sentry/api/endpoints/auth_login.py"]),
("bar", "main", []),
("baz", "master", []),
]
)
@responses.activate
def test_get_trees_for_org_makes_API_requests_before_MAX_CONNECTION_ERRORS_is_hit(self) -> None:
"""
If some requests fail, but `MAX_CONNECTION_ERRORS` isn't hit, requests will continue
to be made to the API.
"""
installation = self.get_installation_helper()
self.set_rate_limit()
# Given that below we mock MAX_CONNECTION_ERRORS to be 2, the error we hit here
# should NOT force the remaining repos to pull from the cache.
responses.replace(
responses.GET,
f"{self.base_url}/repos/Test-Organization/xyz/git/trees/master?recursive=1",
body=ApiError("Server Error"),
)
# Clear the cache so we can tell when we're pulling from it rather than from an
# API call
cache.clear()
with patch(
"sentry.integrations.source_code_management.repo_trees.MAX_CONNECTION_ERRORS",
new=2,
):
trees = installation.get_trees_for_org()
assert trees == self._expected_trees(
[
# xyz is missing because its request errors
# foo has data because its API request is made in spite of xyz's error
("foo", "master", ["src/sentry/api/endpoints/auth_login.py"]),
# bar and baz are missing because their API requests throw errors for
# other reasons in the default mock responses
]
)
@responses.activate
def test_get_trees_for_org_falls_back_to_cache_once_MAX_CONNECTION_ERRORS_is_hit(self) -> None:
"""Once `MAX_CONNECTION_ERRORS` requests fail, the rest will grab from the cache."""
installation = self.get_installation_helper()
self.set_rate_limit()
# Given that below we mock MAX_CONNECTION_ERRORS to be 1, the error we hit here
# should force the remaining repos to pull from the cache.
responses.replace(
responses.GET,
f"{self.base_url}/repos/Test-Organization/xyz/git/trees/master?recursive=1",
body=ApiError("Server Error"),
)
# Clear the cache so we can tell when we're pulling from it rather than from an
# API call
cache.clear()
with patch(
"sentry.integrations.source_code_management.repo_trees.MAX_CONNECTION_ERRORS",
new=1,
):
trees = installation.get_trees_for_org()
assert trees == self._expected_trees(
[
# xyz isn't here because the request errors out.
# foo, bar, and baz are here but have no files, because xyz's error
# caused us to pull from the empty cache
("foo", "master", []),
("bar", "main", []),
("baz", "master", []),
]
)
@responses.activate
def test_get_commit_context_all_frames(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="github",
external_id=123,
config={"name": "Test-Organization/foo"},
integration_id=integration.id,
)
self.set_rate_limit()
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
file = SourceLineInfo(
path="src/github.py",
lineno=10,
ref="master",
repo=repo,
code_mapping=None, # type: ignore[arg-type]
)
responses.add(
responses.POST,
url="https://api.github.com/graphql",
json={
"data": {
"repository0": {
"ref0": {
"target": {
"blame0": {
"ranges": [
{
"commit": {
"oid": "123",
"author": {
"name": "Foo",
"email": "foo@example.com",
},
"message": "hello",
"committedDate": "2023-01-01T00:00:00Z",
},
"startingLine": 10,
"endingLine": 15,
"age": 0,
},
]
},
}
}
}
}
},
content_type="application/json",
status=200,
)
response = installation.get_commit_context_all_frames([file], extra={})
assert response == [
FileBlameInfo(
**asdict(file),
commit=CommitInfo(
commitId="123",
commitMessage="hello",
committedDate=datetime(2023, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
commitAuthorEmail="foo@example.com",
commitAuthorName="Foo",
),
)
]
@responses.activate
def test_source_url_matches(self) -> None:
installation = self.get_installation_helper()
test_cases = [
(
"https://github.com/Test-Organization/sentry/blob/master/src/sentry/integrations/github/integration.py",
True,
),
(
"https://notgithub.com/Test-Organization/sentry/blob/master/src/sentry/integrations/github/integration.py",
False,
),
("https://jianyuan.io", False),
]
for source_url, matches in test_cases:
assert installation.source_url_matches(source_url) == matches
@responses.activate
def test_extract_branch_from_source_url(self) -> None:
installation = self.get_installation_helper()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/repo",
url="https://github.com/Test-Organization/repo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/repo"},
integration_id=integration.id,
)
source_url = "https://github.com/Test-Organization/repo/blob/master/src/sentry/integrations/github/integration.py"
assert installation.extract_branch_from_source_url(repo, source_url) == "master"
@responses.activate
def test_extract_source_path_from_source_url(self) -> None:
installation = self.get_installation_helper()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/repo",
url="https://github.com/Test-Organization/repo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/repo"},
integration_id=integration.id,
)
source_url = "https://github.com/Test-Organization/repo/blob/master/src/sentry/integrations/github/integration.py"
assert (
installation.extract_source_path_from_source_url(repo, source_url)
== "src/sentry/integrations/github/integration.py"
)
@responses.activate
def test_get_stacktrace_link_with_special_chars(self) -> None:
"""Test that URLs with special characters (like square brackets) are properly encoded"""
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/foo"},
integration_id=integration.id,
)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
filepath = "src/components/[id]/test.py"
branch = "master"
responses.add(
responses.HEAD,
f"{self.base_url}/repos/{repo.name}/contents/{filepath}?ref={branch}",
)
source_url = installation.get_stacktrace_link(repo, filepath, branch, branch)
assert (
source_url
== "https://github.com/Test-Organization/foo/blob/master/src/components/%5Bid%5D/test.py"
)
@responses.activate
def test_get_stacktrace_link_avoid_double_quote(self) -> None:
"""Test that URLs with special characters (like square brackets) are properly encoded"""
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="integrations:github",
external_id=123,
config={"name": "Test-Organization/foo"},
integration_id=integration.id,
)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
filepath = "src/components/test%20id/test.py"
branch = "master"
responses.add(
responses.HEAD,
f"{self.base_url}/repos/{repo.name}/contents/{filepath}?ref={branch}",
)
source_url = installation.get_stacktrace_link(repo, filepath, branch, branch)
assert (
source_url
== "https://github.com/Test-Organization/foo/blob/master/src/components/test%20id/test.py"
)
@responses.activate
def test_get_account_id(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
assert installation.get_account_id() == 60591805
@responses.activate
def test_get_account_id_backfill_missing(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
del integration.metadata["account_id"]
integration.save()
integration_id = integration.id
# Checking that the account_id doesn't exist before we "backfill" it
integration = Integration.objects.get(id=integration_id)
assert integration.metadata.get("account_id") is None
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
assert installation.get_account_id() == 60591805
integration = Integration.objects.get(id=integration_id)
assert integration.metadata["account_id"] == 60591805
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch.object(github_integration, "render_react_view", return_value=HttpResponse())
def test_github_installation_calls_ui(
self, mock_render: MagicMock, mock_record: MagicMock
) -> None:
self._setup_with_existing_installations()
installations = [
{
"installation_id": "1",
"github_account": "santry",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pitchforks.png",
},
{
"installation_id": "2",
"github_account": "bufo-bot",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pog.png",
},
{
"installation_id": "-1",
"github_account": "Integrate with a new GitHub organization",
"avatar_url": "",
},
]
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "github.com"
assert redirect.path == "/login/oauth/authorize"
assert (
redirect.query
== f"client_id=github-client-id&state={self.pipeline.signature}&redirect_uri=http://testserver/extensions/github/setup/"
)
resp = self.client.get(
"{}?{}".format(
self.setup_path,
urlencode({"code": "12345678901234567890", "state": self.pipeline.signature}),
)
)
serialized_organization = organization_service.serialize_organization(
id=self.organization.id, as_user=serialize_rpc_user(self.user)
)
mock_render.assert_called_with(
request=ANY,
pipeline_name="githubInstallationSelect",
props={
"installation_info": installations,
"has_scm_multi_org": True,
"organization_slug": self.organization.slug,
"organization": serialized_organization,
},
)
# SLO assertions
assert_success_metric(mock_record)
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_github_installation_stores_chosen_installation(self, mock_record: MagicMock) -> None:
self._setup_with_existing_installations()
chosen_installation_id = "1"
self.create_integration(
organization=self.organization,
provider="github",
external_id=chosen_installation_id,
name="santry",
)
responses.add(
responses.GET,
f"{self.base_url}/app/installations/{chosen_installation_id}",
json={
"id": chosen_installation_id,
"app_id": self.app_id,
"account": {
"id": chosen_installation_id,
"login": "poggers-org",
"avatar_url": "http://example.com/bufo-pog.png",
"html_url": "https://github.com/pog-organization",
"type": "Organization",
},
},
)
self._setup_select_github_organization()
resp = self._setup_choose_installation(chosen_installation_id)
assert resp.status_code == 200
auth_header = responses.calls[4].request.headers["Authorization"]
assert auth_header == "Bearer jwt_token_1"
assert (
responses.calls[4].request.url
== f"https://api.github.com/app/installations/{chosen_installation_id}"
)
self.assertDialogSuccess(resp)
# Affirm OrganizationIntegration is created
integration = Integration.objects.get(external_id=chosen_installation_id)
assert integration.provider == "github"
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration=integration
).exists()
# SLO assertions
assert_success_metric(mock_record)
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_github_installation_fails_on_invalid_installation(
self, mock_record: MagicMock
) -> None:
self._setup_with_existing_installations()
self._setup_select_github_organization()
# We rendered the GithubOrganizationSelection UI and
# the attacker modified the installation id with their own
resp = self._setup_choose_installation("98765")
self.assertTemplateUsed(resp, "sentry/integrations/github-integration-failed.html")
assert (
b'{"success":false,"data":{"error":"User does not have access to given installation."}'
in resp.content
)
assert (
b"Your GitHub account does not have owner privileges for the chosen organization."
in resp.content
)
assert b'window.opener.postMessage({"success":false' in resp.content
# SLO assertions
# OAuth_login (success): redirect to log into GH ->
# organization_select (success): redirect to UI selection page->
# OAuth_login (success): we returned the UI redirect so work back up callstack ->
# organization_select (failure): given installation_id was invalid
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=4
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=3
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
assert_failure_metric(mock_record, GitHubInstallationError.INVALID_INSTALLATION)
@with_feature({"organizations:integrations-scm-multi-org": False})
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch.object(github_integration, "render_react_view", return_value=HttpResponse())
def test_github_installation_calls_ui_no_biz_plan(
self, mock_render: MagicMock, mock_record: MagicMock
) -> None:
self._setup_with_existing_installations()
installations = [
{
"installation_id": "1",
"github_account": "santry",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pitchforks.png",
},
{
"installation_id": "2",
"github_account": "bufo-bot",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pog.png",
},
{
"installation_id": "-1",
"github_account": "Integrate with a new GitHub organization",
"avatar_url": "",
},
]
self._setup_select_github_organization()
serialized_organization = organization_service.serialize_organization(
id=self.organization.id, as_user=serialize_rpc_user(self.user)
)
mock_render.assert_called_with(
request=ANY,
pipeline_name="githubInstallationSelect",
props={
"installation_info": installations,
"has_scm_multi_org": False,
"organization_slug": self.organization.slug,
"organization": serialized_organization,
},
)
# SLO assertions
assert_success_metric(mock_record)
@with_feature({"organizations:integrations-scm-multi-org": False})
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch.object(github_integration, "render_react_view", return_value=HttpResponse())
def test_errors_when_invalid_access_to_multi_org(
self, mock_render: MagicMock, mock_record: MagicMock
) -> None:
self._setup_with_existing_installations()
installations = [
{
"installation_id": "1",
"github_account": "santry",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pitchforks.png",
},
{
"installation_id": "2",
"github_account": "bufo-bot",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pog.png",
},
{
"installation_id": "-1",
"github_account": "Integrate with a new GitHub organization",
"avatar_url": "",
},
]
resp = self._setup_select_github_organization()
serialized_organization = organization_service.serialize_organization(
id=self.organization.id, as_user=serialize_rpc_user(self.user)
)
mock_render.assert_called_with(
request=ANY,
pipeline_name="githubInstallationSelect",
props={
"installation_info": installations,
"has_scm_multi_org": False,
"organization_slug": self.organization.slug,
"organization": serialized_organization,
},
)
# We rendered the GithubOrganizationSelection UI and the user chose to skip
resp = self._setup_choose_installation("12345")
self.assertTemplateUsed(resp, "sentry/integrations/github-integration-failed.html")
assert (
b'{"success":false,"data":{"error":"Your organization does not have access to this feature."}}'
in resp.content
)
assert b'window.opener.postMessage({"success":false' in resp.content
assert_failure_metric(mock_record, GitHubInstallationError.FEATURE_NOT_AVAILABLE)
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_github_installation_skips_chosen_installation(self, mock_record: MagicMock) -> None:
self.assert_setup_flow()
# Affirm OrganizationIntegration is created
integration = Integration.objects.get(external_id=self.installation_id)
assert integration.provider == "github"
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration=integration
).exists()
# SLO assertions
assert_success_metric(mock_record)
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
def test_github_installation_gets_owner_orgs(self) -> None:
self._setup_with_existing_installations()
pipeline_view = OAuthLoginView()
pipeline_view.client = GithubSetupApiClient(self.access_token)
owner_orgs = pipeline_view._get_owner_github_organizations()
assert owner_orgs == ["santry"]
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
def test_github_installation_filters_valid_installations(self) -> None:
self._setup_with_existing_installations()
pipeline_view = OAuthLoginView()
pipeline_view.client = GithubSetupApiClient(self.access_token)
owner_orgs = pipeline_view._get_owner_github_organizations()
assert owner_orgs == ["santry"]
installation_info = pipeline_view._get_eligible_multi_org_installations(
owner_orgs=owner_orgs
)
assert installation_info == [
{
"installation_id": "1",
"github_account": "santry",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pitchforks.png",
},
{
"installation_id": "2",
"github_account": "bufo-bot",
"avatar_url": "https://github.com/knobiknows/all-the-bufo/raw/main/all-the-bufo/bufo-pog.png",
},
]
@with_feature("organizations:integrations-scm-multi-org")
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_github_installation_validates_installing_organization(
self, mock_record: MagicMock
) -> None:
self._setup_with_existing_installations()
chosen_installation_id = "1"
self._setup_select_github_organization()
# Create a new organization and switch to it
self.create_organization(name="new-org", owner=self.user)
self.login_as(self.user)
# Try to continue the installation with the new organization
resp = self._setup_choose_installation(chosen_installation_id)
self.assertTemplateUsed(resp, "sentry/integrations/github-integration-failed.html")
assert (
b'{"success":false,"data":{"error":"Your organization does not have access to this feature."}}'
in resp.content
)
assert b'window.opener.postMessage({"success":false' in resp.content
assert_failure_metric(mock_record, GitHubInstallationError.FEATURE_NOT_AVAILABLE)
@responses.activate
@with_feature("organizations:integrations-github-project-management")
def test_get_organization_config(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
fields = installation.get_organization_config()
assert [field["name"] for field in fields] == [
"sync_status_forward",
"sync_status_reverse",
"sync_reverse_assignment",
"sync_forward_assignment",
"resolution_strategy",
"sync_comments",
]
@responses.activate
def test_update_organization_config(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
org_integration = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
# Initial config should be empty
assert org_integration.config == {}
# Update configuration
data = {"sync_reverse_assignment": True, "other_option": "test_value"}
installation.update_organization_config(data)
# Refresh from database
org_integration.refresh_from_db()
# Check that config was updated
assert org_integration.config["sync_reverse_assignment"] is True
assert org_integration.config["other_option"] == "test_value"
@responses.activate
def test_update_organization_config_preserves_existing(self) -> None:
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
org_integration = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
org_integration.config = {
"existing_key": "existing_value",
"sync_reverse_assignment": False,
}
org_integration.save()
# Update configuration with new data
data = {"sync_reverse_assignment": True, "new_key": "new_value"}
installation.update_organization_config(data)
org_integration.refresh_from_db()
# Check that config was updated and existing keys preserved
assert org_integration.config["existing_key"] == "existing_value"
assert org_integration.config["sync_reverse_assignment"] is True
assert org_integration.config["new_key"] == "new_value"
@responses.activate
def test_update_organization_config_no_org_integration(self) -> None:
# Create integration without organization integration
integration = self.create_provider_integration(
provider="github",
external_id="test_external_id",
metadata={
"access_token": self.access_token,
"expires_at": self.expires_at[:-1],
"icon": "http://example.com/avatar.png",
"domain_name": "github.com/Test-Organization",
"account_type": "Organization",
},
)
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
# update_organization_config should handle case where org_integration doesn't exist gracefully
# Based on the implementation, it returns early when org_integration is None
data = {"sync_reverse_assignment": True}
# The update_organization_config method checks for org_integration and returns early if it doesn't exist
# This shouldn't raise an error based on the implementation
try:
installation.update_organization_config(data)
except Exception:
# If an exception is raised, the method doesn't handle missing org_integration gracefully
pass
# No OrganizationIntegration should exist
assert not OrganizationIntegration.objects.filter(
integration=integration, organization_id=self.organization.id
).exists()
@responses.activate
def test_sync_assignee_outbound(self) -> None:
"""Test assigning a GitHub issue to a user with linked GitHub account"""
user, installation, external_issue, _, _ = self._setup_assignee_sync_test()
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"assignees": ["octocat"]},
status=200,
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 1
request = responses.calls[0].request
assert request.url == "https://api.github.com/repos/Test-Organization/foo/issues/123"
assert orjson.loads(request.body) == {"assignees": ["octocat"]}
@responses.activate
def test_sync_assignee_outbound_case_insensitive(self) -> None:
"""Test assigning a GitHub issue to a user with linked GitHub account"""
user, installation, external_issue, _, _ = self._setup_assignee_sync_test(
external_name="@JohnDoe"
)
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"assignees": ["johndoe"]},
status=200,
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 1
request = responses.calls[0].request
assert request.url == "https://api.github.com/repos/Test-Organization/foo/issues/123"
assert orjson.loads(request.body) == {"assignees": ["johndoe"]}
@responses.activate
def test_sync_assignee_outbound_unassign(self) -> None:
"""Test unassigning a GitHub issue"""
user, installation, external_issue, _, _ = self._setup_assignee_sync_test()
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"assignees": []},
status=200,
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, user, assign=False)
assert len(responses.calls) == 1
request = responses.calls[0].request
assert request.url == "https://api.github.com/repos/Test-Organization/foo/issues/123"
assert orjson.loads(request.body) == {"assignees": []}
@responses.activate
def test_sync_assignee_outbound_no_external_actor(self) -> None:
"""Test that sync fails gracefully when user has no GitHub account linked"""
user, installation, external_issue, _, _ = self._setup_assignee_sync_test(
create_external_user=False
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 0
@responses.activate
def test_sync_assignee_outbound_invalid_key_format(self) -> None:
"""Test that sync handles invalid external issue key format gracefully"""
user, installation, external_issue, _, _ = self._setup_assignee_sync_test(
issue_key="invalid-key-format"
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 0
@responses.activate
def test_sync_assignee_outbound_strips_at_symbol(self) -> None:
"""Test that @ symbol is stripped from external_name when syncing"""
user, installation, external_issue, _, _ = self._setup_assignee_sync_test()
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"assignees": ["octocat"]},
status=200,
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 1
request = responses.calls[0].request
assert orjson.loads(request.body) == {"assignees": ["octocat"]}
@responses.activate
def test_sync_assignee_outbound_with_none_user(self) -> None:
"""Test that assigning with no user does not make an API call"""
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
integration.metadata.update(
{
"access_token": self.access_token,
"expires_at": self.expires_at,
}
)
integration.save()
installation = get_installation_of_type(
GitHubIntegration, integration, self.organization.id
)
group = self.create_group()
external_issue = self.create_integration_external_issue(
group=group,
integration=integration,
key="Test-Organization/foo#123",
)
responses.calls.reset()
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_assignee_outbound(external_issue, None, assign=True)
# Should not make any API calls when user is None and assign=True
assert len(responses.calls) == 0
@responses.activate
@with_feature("organizations:integrations-github-outbound-status-sync")
def test_sync_status_outbound_resolved(self) -> None:
"""Test syncing resolved status to GitHub (close issue)."""
installation = self.integration.get_installation(self.organization.id)
external_issue = self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="Test-Organization/foo#123",
)
self.create_integration_external_project(
organization_id=self.organization.id,
integration_id=self.integration.id,
external_id="Test-Organization/foo",
resolved_status="closed",
unresolved_status="open",
)
responses.add(
responses.GET,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"state": "open", "number": 123},
)
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"state": "closed", "number": 123},
)
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_status_outbound(
external_issue, is_resolved=True, project_id=self.project.id
)
assert len(responses.calls) == 2
assert responses.calls[1].request.method == "PATCH"
request_body = orjson.loads(responses.calls[1].request.body)
assert request_body == {"state": "closed"}
@responses.activate
@with_feature("organizations:integrations-github-outbound-status-sync")
def test_sync_status_outbound_unresolved(self) -> None:
"""Test syncing unresolved status to GitHub (reopen issue)."""
installation = self.integration.get_installation(self.organization.id)
external_issue = self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="Test-Organization/foo#123",
)
self.create_integration_external_project(
organization_id=self.organization.id,
integration_id=self.integration.id,
external_id="Test-Organization/foo",
resolved_status="closed",
unresolved_status="open",
)
responses.add(
responses.GET,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"state": "closed", "number": 123},
)
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"state": "open", "number": 123},
)
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_status_outbound(
external_issue, is_resolved=False, project_id=self.project.id
)
assert len(responses.calls) == 2
assert responses.calls[1].request.method == "PATCH"
request_body = orjson.loads(responses.calls[1].request.body)
assert request_body == {"state": "open"}
@responses.activate
@with_feature("organizations:integrations-github-outbound-status-sync")
def test_sync_status_outbound_unchanged(self) -> None:
"""Test that no update is made when status is already in desired state."""
installation = self.integration.get_installation(self.organization.id)
external_issue = self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="Test-Organization/foo#123",
)
self.create_integration_external_project(
organization_id=self.organization.id,
integration_id=self.integration.id,
external_id="Test-Organization/foo",
resolved_status="closed",
unresolved_status="open",
)
# Mock get issue to return closed status (already resolved)
responses.add(
responses.GET,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"state": "closed", "number": 123},
)
# Test resolve when already closed - should not make update call
with assume_test_silo_mode(SiloMode.REGION):
installation.sync_status_outbound(
external_issue, is_resolved=True, project_id=self.project.id
)
# Verify only GET was called, no PATCH
assert len(responses.calls) == 1
assert responses.calls[0].request.method == "GET"
@with_feature("organizations:integrations-github-outbound-status-sync")
def test_sync_status_outbound_no_external_project(self) -> None:
"""Test that sync_status_outbound returns early if no external project mapping exists."""
installation = self.integration.get_installation(self.organization.id)
# Create external issue without project mapping
with assume_test_silo_mode(SiloMode.REGION):
external_issue = self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="Test-Organization/foo#123",
)
# No responses needed - should return early
with assume_test_silo_mode(SiloMode.REGION):
# Should not raise an exception, just return early
installation.sync_status_outbound(
external_issue, is_resolved=True, project_id=self.project.id
)
@responses.activate
@with_feature("organizations:integrations-github-outbound-status-sync")
def test_sync_status_outbound_api_error_on_get(self) -> None:
"""Test that API errors on get_issue are handled properly."""
from sentry.shared_integrations.exceptions import IntegrationError
installation = self.integration.get_installation(self.organization.id)
external_issue = self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="Test-Organization/foo#123",
)
self.create_integration_external_project(
organization_id=self.organization.id,
integration_id=self.integration.id,
external_id="Test-Organization/foo",
resolved_status="closed",
unresolved_status="open",
)
responses.add(
responses.GET,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"message": "Not Found"},
status=404,
)
with assume_test_silo_mode(SiloMode.REGION):
with pytest.raises(IntegrationError):
installation.sync_status_outbound(
external_issue, is_resolved=True, project_id=self.project.id
)
@responses.activate
@with_feature("organizations:integrations-github-outbound-status-sync")
def test_sync_status_outbound_api_error_on_update(self) -> None:
"""Test that API errors on update_issue are handled properly."""
from sentry.shared_integrations.exceptions import IntegrationError
installation = self.integration.get_installation(self.organization.id)
external_issue = self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="Test-Organization/foo#123",
)
self.create_integration_external_project(
organization_id=self.organization.id,
integration_id=self.integration.id,
external_id="Test-Organization/foo",
resolved_status="closed",
unresolved_status="open",
)
responses.add(
responses.GET,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"state": "open", "number": 123},
)
# Mock update issue to return error
responses.add(
responses.PATCH,
"https://api.github.com/repos/Test-Organization/foo/issues/123",
json={"message": "Issues are disabled for this repo"},
status=410,
)
# Test that error is raised properly
with assume_test_silo_mode(SiloMode.REGION):
with pytest.raises(IntegrationError):
installation.sync_status_outbound(
external_issue, is_resolved=True, project_id=self.project.id
)
def test_create_comment(self) -> None:
self.user.name = "Sentry Admin"
self.user.save()
installation = self.integration.get_installation(self.organization.id)
group_note = mock.Mock()
comment = "hello world\nThis is a comment.\n\n\n Glad it's quoted"
group_note.data = {"text": comment}
with mock.patch.object(GitHubApiClient, "create_comment") as mock_create_comment:
installation.create_comment("Test-Organization/foo#123", self.user.id, group_note)
assert mock_create_comment.call_args[0][1] == "123"
assert mock_create_comment.call_args[0][2] == {
"body": "**Sentry Admin** wrote:\n\n> hello world\n> This is a comment.\n> \n> \n> Glad it's quoted"
}
def test_update_comment(self) -> None:
installation = self.integration.get_installation(self.organization.id)
group_note = mock.Mock()
comment = "hello world\nThis is a comment.\n\n\n I've changed it"
group_note.data = {"text": comment, "external_id": "123"}
with mock.patch.object(GitHubApiClient, "update_comment") as mock_update_comment:
installation.update_comment("Test-Organization/foo#123", self.user.id, group_note)
assert mock_update_comment.call_args[0] == (
"Test-Organization/foo",
"123",
"123",
{
"body": "**** wrote:\n\n> hello world\n> This is a comment.\n> \n> \n> I've changed it"
},
)
| GitHubIntegrationTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.