language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py | {
"start": 13863,
"end": 15101
} | class ____(PreTrainedModel):
config: VitPoseBackboneConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["VitPoseBackboneEmbeddings", "VitPoseBackboneLayer"]
_supports_sdpa = True
_supports_flash_attn = True
_can_record_outputs = {
"attentions": VitPoseBackboneSelfAttention,
}
@torch.no_grad()
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm, VitPoseBackboneEmbeddings]):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, VitPoseBackboneEmbeddings):
init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
@auto_docstring(
custom_intro="""
The VitPose backbone useful for downstream tasks.
"""
)
| VitPoseBackbonePreTrainedModel |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/cached_property.py | {
"start": 40,
"end": 219
} | class ____:
@cached_property
def prop(self) -> int:
return 1
@cached_property
def prop_with_type_comment(self):
# type: () -> int
return 1
| Foo |
python | getsentry__sentry | tests/sentry/integrations/github/test_installation.py | {
"start": 499,
"end": 2591
} | class ____(APITestCase):
base_url = "https://api.github.com"
def setUp(self) -> None:
self.login_as(self.user)
self.url = "/extensions/github/webhook/"
self.secret = "b3002c3e321d4b7880360d397db2ccfd"
options.set("github-app.webhook-secret", self.secret)
@responses.activate
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
def test_installation_endpoint(self, get_jwt: MagicMock) -> None:
# add installation via GitHub webhook
responses.add(
method=responses.GET,
url="https://api.github.com/app/installations/2",
body=INSTALLATION_API_RESPONSE,
status=200,
content_type="application/json",
)
response = self.client.post(
path=self.url,
data=INSTALLATION_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="installation",
HTTP_X_HUB_SIGNATURE="sha1=348e46312df2901e8cb945616ee84ce30d9987c9",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
# check for endpoint response
installation_url = reverse("sentry-integration-github-installation", args=[2])
response = self.client.get(installation_url)
assert response.status_code == 200
body = response.json()
assert body["account"]["login"] == "octocat"
assert body["account"]["type"] == "User"
assert body["sender"]["id"] == 1
assert body["sender"]["login"] == "octocat"
# data should be hidden after exposure window
with freeze_time(before_now(seconds=-INSTALLATION_EXPOSURE_MAX_TIME - 10)):
response = self.client.get(installation_url)
assert response.status_code == 404
def test_no_installation(self) -> None:
installation_url = reverse("sentry-integration-github-installation", args=[888])
response = self.client.get(installation_url)
assert response.status_code == 404
| InstallationEndpointTest |
python | sqlalchemy__sqlalchemy | test/ext/test_baked.py | {
"start": 28460,
"end": 33026
} | class ____(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = "each"
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
self.mapper_registry.map_imperatively(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw,
)
},
)
self.mapper_registry.map_imperatively(Address, self.tables.addresses)
return User, Address
def _query_fixture(self):
from sqlalchemy.orm.query import Query
class CachingQuery(Query):
cache = {}
def set_cache_key(self, key):
return self.execution_options(_cache_key=key)
def set_cache_key_for_path(self, path, key):
return self.execution_options(**{"_cache_key_%s" % path: key})
def get_value(cache_key, cache, createfunc):
if cache_key in cache:
return cache[cache_key]()
else:
cache[cache_key] = retval = createfunc().freeze()
return retval()
s1 = fixture_session(query_cls=CachingQuery)
@event.listens_for(s1, "do_orm_execute", retval=True)
def do_orm_execute(orm_context):
ckey = None
for opt in orm_context.user_defined_options:
ckey = opt.get_cache_key(orm_context)
if ckey:
break
else:
if "_cache_key" in orm_context.execution_options:
ckey = orm_context.execution_options["_cache_key"]
if ckey is not None:
return get_value(
ckey,
CachingQuery.cache,
orm_context.invoke_statement,
)
return s1
def _option_fixture(self):
from sqlalchemy.orm.interfaces import UserDefinedOption
class RelationshipCache(UserDefinedOption):
inherit_cache = True
propagate_to_loaders = True
def get_cache_key(self, orm_context):
if orm_context.loader_strategy_path:
return "user7_addresses"
else:
return None
return RelationshipCache()
def test_non_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).set_cache_key("user7")
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
eq_(list(q.cache), ["user7"])
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
def test_non_baked_tuples(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).set_cache_key("user7")
eq_(
sess.execute(q.statement).all(),
[(User(id=7, addresses=[Address(id=1)]),)],
)
eq_(list(q.cache), ["user7"])
eq_(
sess.execute(q.statement).all(),
[(User(id=7, addresses=[Address(id=1)]),)],
)
def test_use_w_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.filter(User.id == 7)
base_bq += lambda q: q.set_cache_key("user7")
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
eq_(list(q.cache), ["user7"])
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
def test_plain_w_baked_lazyload(self):
User, Address = self._o2m_fixture()
opt = self._option_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).options(opt)
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(list(q.cache), ["user7_addresses"])
sess.close()
# ensure caching logic works after query has been baked
q.cache.clear()
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(list(q.cache), ["user7_addresses"])
| CustomIntegrationTest |
python | doocs__leetcode | solution/1300-1399/1306.Jump Game III/Solution.py | {
"start": 0,
"end": 395
} | class ____:
def canReach(self, arr: List[int], start: int) -> bool:
q = deque([start])
while q:
i = q.popleft()
if arr[i] == 0:
return True
x = arr[i]
arr[i] = -1
for j in (i + x, i - x):
if 0 <= j < len(arr) and arr[j] >= 0:
q.append(j)
return False
| Solution |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 53829,
"end": 54274
} | class ____(WorkPoolCommandAction):
"""Resumes a Work Pool"""
type: Literal["resume-work-pool"] = "resume-work-pool"
_action_description: ClassVar[str] = "Resuming work pool"
async def command(
self,
orchestration: "OrchestrationClient",
work_pool: WorkPool,
triggered_action: "TriggeredAction",
) -> Response:
return await orchestration.resume_work_pool(work_pool.name)
| ResumeWorkPool |
python | kamyu104__LeetCode-Solutions | Python/alert-using-same-key-card-three-or-more-times-in-a-one-hour-period.py | {
"start": 71,
"end": 915
} | class ____(object):
def alertNames(self, keyName, keyTime):
"""
:type keyName: List[str]
:type keyTime: List[str]
:rtype: List[str]
"""
THRESHOLD = 3
name_to_times = collections.defaultdict(list)
for name, hour_minute in itertools.izip(keyName, keyTime):
hour, minute = map(int, hour_minute.split(':'))
name_to_times[name].append(hour*60 + minute)
names = []
for name, times in name_to_times.iteritems():
times.sort()
left = 0
for right, time in enumerate(times):
while time-times[left] > 60:
left += 1
if right-left+1 >= THRESHOLD:
names.append(name)
break
names.sort()
return names
| Solution |
python | pandas-dev__pandas | pandas/tests/series/test_api.py | {
"start": 230,
"end": 9452
} | class ____:
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
def test_tab_completion_dt(self):
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
def test_tab_completion_cat(self):
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
def test_tab_completion_cat_str(self):
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
s = Series(list("aabbcde")).astype("category")
results = sorted({r for r in s.cat.__dir__() if not r.startswith("_")})
tm.assert_almost_equal(results, sorted(set(ok_for_cat)))
@pytest.mark.parametrize(
"index",
[
Index(list("ab") * 5, dtype="category"),
Index([str(i) for i in range(10)]),
Index(["foo", "bar", "baz"] * 2),
date_range("2020-01-01", periods=10),
period_range("2020-01-01", periods=10, freq="D"),
timedelta_range("1 day", periods=10),
Index(np.arange(10), dtype=np.uint64),
Index(np.arange(10), dtype=np.int64),
Index(np.arange(10), dtype=np.float64),
Index([True, False]),
Index([f"a{i}" for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = Series(index=index, dtype=object)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert not isinstance(x, str) or not x.isidentifier() or x in dir_s
else:
assert x not in dir_s
@pytest.mark.parametrize("ser", [Series(dtype=object), Series([1])])
def test_not_hashable(self, ser):
msg = "unhashable type: 'Series'"
with pytest.raises(TypeError, match=msg):
hash(ser)
def test_contains(self, datetime_series):
tm.assert_contains_all(datetime_series.index, datetime_series)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
tm.assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index"))
assert s.dropna().sum(axis="rows") == 3
assert s._get_axis_number("rows") == 0
assert s._get_axis_name("rows") == "index"
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(
np.random.default_rng(2).standard_normal((1000, 3)),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=1000),
)
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
def test_ndarray_compat_like_func(self):
# using an ndarray like function
s = Series(np.random.default_rng(2).standard_normal(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype="float64")
tm.assert_series_equal(result, expected)
def test_empty_method(self):
s_empty = Series(dtype=object)
assert s_empty.empty
@pytest.mark.parametrize("dtype", ["int64", object])
def test_empty_method_full_series(self, dtype):
full_series = Series(index=[1], dtype=dtype)
assert not full_series.empty
@pytest.mark.parametrize("dtype", [None, "Int64"])
def test_integer_series_size(self, dtype):
# GH 25580
s = Series(range(9), dtype=dtype)
assert s.size == 9
def test_attrs(self):
s = Series([0, 1], name="abc")
assert s.attrs == {}
s.attrs["version"] = 1
result = s + 1
assert result.attrs == {"version": 1}
def test_inspect_getmembers(self):
# GH38782
ser = Series(dtype=object)
inspect.getmembers(ser)
def test_unknown_attribute(self):
# GH#9680
tdi = timedelta_range(start=0, periods=10, freq="1s")
ser = Series(np.random.default_rng(2).normal(size=10), index=tdi)
assert "foo" not in ser.__dict__
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
ser.foo
@pytest.mark.parametrize("op", ["year", "day", "second", "weekday"])
def test_datetime_series_no_datelike_attrs(self, op, datetime_series):
# GH#7206
msg = f"'Series' object has no attribute '{op}'"
with pytest.raises(AttributeError, match=msg):
getattr(datetime_series, op)
def test_series_datetimelike_attribute_access(self):
# attribute access should still work!
ser = Series({"year": 2000, "month": 1, "day": 10})
assert ser.year == 2000
assert ser.month == 1
assert ser.day == 10
def test_series_datetimelike_attribute_access_invalid(self):
ser = Series({"year": 2000, "month": 1, "day": 10})
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
ser.weekday
@pytest.mark.parametrize(
"kernel, has_numeric_only",
[
("skew", True),
("var", True),
("all", False),
("prod", True),
("any", False),
("idxmin", False),
("quantile", False),
("idxmax", False),
("min", True),
("sem", True),
("mean", True),
("nunique", False),
("max", True),
("sum", True),
("count", False),
("median", True),
("std", True),
("rank", True),
("pct_change", False),
("cummax", False),
("shift", False),
("diff", False),
("cumsum", False),
("cummin", False),
("cumprod", False),
("fillna", False),
("ffill", False),
("bfill", False),
("sample", False),
("tail", False),
("take", False),
("head", False),
("cov", False),
("corr", False),
],
)
@pytest.mark.parametrize("dtype", [bool, int, float, object])
def test_numeric_only(self, kernel, has_numeric_only, dtype):
# GH#47500
ser = Series([0, 1, 1], dtype=dtype)
if kernel == "corrwith":
args = (ser,)
elif kernel == "corr":
args = (ser,)
elif kernel == "cov":
args = (ser,)
elif kernel == "nth":
args = (0,)
elif kernel == "fillna":
args = (True,)
elif kernel == "fillna":
args = ("ffill",)
elif kernel == "take":
args = ([0],)
elif kernel == "quantile":
args = (0.5,)
else:
args = ()
method = getattr(ser, kernel)
if not has_numeric_only:
msg = (
"(got an unexpected keyword argument 'numeric_only'"
"|too many arguments passed in)"
)
with pytest.raises(TypeError, match=msg):
method(*args, numeric_only=True)
elif dtype is object:
msg = f"Series.{kernel} does not allow numeric_only=True with non-numeric"
with pytest.raises(TypeError, match=msg):
method(*args, numeric_only=True)
else:
result = method(*args, numeric_only=True)
expected = method(*args, numeric_only=False)
if isinstance(expected, Series):
# transformer
tm.assert_series_equal(result, expected)
else:
# reducer
assert result == expected
| TestSeriesMisc |
python | PrefectHQ__prefect | tests/cli/test_work_pool.py | {
"start": 20354,
"end": 21228
} | class ____:
async def test_ls(self, prefect_client, work_pool):
res = await run_sync_in_worker_thread(
invoke_and_assert,
"work-pool ls",
)
assert res.exit_code == 0
async def test_verbose(self, prefect_client, work_pool):
res = await run_sync_in_worker_thread(
invoke_and_assert,
"work-pool ls --verbose",
)
assert res.exit_code == 0
async def test_ls_with_zero_concurrency_limit(self, prefect_client, work_pool):
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool set-concurrency-limit {work_pool.name} 0",
)
assert res.exit_code == 0
res = await run_sync_in_worker_thread(
invoke_and_assert,
"work-pool ls",
)
assert "None" not in res.output
| TestLS |
python | doocs__leetcode | solution/0000-0099/0057.Insert Interval/Solution2.py | {
"start": 0,
"end": 604
} | class ____:
def insert(
self, intervals: List[List[int]], newInterval: List[int]
) -> List[List[int]]:
st, ed = newInterval
ans = []
insert = False
for s, e in intervals:
if ed < s:
if not insert:
ans.append([st, ed])
insert = True
ans.append([s, e])
elif e < st:
ans.append([s, e])
else:
st = min(st, s)
ed = max(ed, e)
if not insert:
ans.append([st, ed])
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-mysql/dagster_mysql_tests/test_daemon_cursor_storage.py | {
"start": 258,
"end": 551
} | class ____(TestDaemonCursorStorage):
__test__ = True
@pytest.fixture(scope="function", name="storage")
def cursor_storage(self, conn_string):
storage = MySQLRunStorage.create_clean_storage(conn_string)
assert storage
return storage
| TestMySQLDaemonCursorStorage |
python | getsentry__sentry | src/sentry/auth/providers/dummy.py | {
"start": 532,
"end": 1120
} | class ____(AuthView):
def dispatch(self, request: HttpRequest, pipeline: AuthHelper) -> HttpResponseBase:
if "email" in request.POST:
if "id" in request.POST:
pipeline.bind_state("id", request.POST.get("id"))
pipeline.bind_state("email", request.POST.get("email"))
pipeline.bind_state("legacy_email", request.POST.get("legacy_email"))
pipeline.bind_state("email_verified", bool(request.POST.get("email_verified")))
return pipeline.next_step()
return HttpResponse(PLACEHOLDER_TEMPLATE)
| AskEmail |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/cfg_test.py | {
"start": 878,
"end": 1203
} | class ____(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
| CountingVisitor |
python | ansible__ansible | test/units/module_utils/basic/test_argument_spec.py | {
"start": 11136,
"end": 17560
} | class ____:
"""Test with a more complex arg_spec"""
@pytest.mark.parametrize('stdin', [{'foo': 'hello'}, {'dup': 'hello'}], indirect=['stdin'])
def test_complex_required(self, stdin, complex_argspec):
"""Test that the complex argspec works if we give it its required param as either the canonical or aliased name"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['foo'], str)
assert am.params['foo'] == 'hello'
@pytest.mark.parametrize('stdin', [{'foo': 'hello1', 'dup': 'hello2'}], indirect=['stdin'])
def test_complex_duplicate_warning(self, stdin, complex_argspec):
"""Test that the complex argspec issues a warning if we specify an option both with its canonical name and its alias"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['foo'], str)
assert 'Both option foo and its alias dup are set.' in get_warning_messages()
assert am.params['foo'] == 'hello2'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'test'}], indirect=['stdin'])
def test_complex_type_fallback(self, mocker, stdin, complex_argspec):
"""Test that the complex argspec works if we get a required parameter via fallback"""
environ = os.environ.copy()
environ['BAZ'] = 'test data'
mocker.patch('ansible.module_utils.basic.os.environ', environ)
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['baz'], str)
assert am.params['baz'] == 'test data'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'bad', 'bam': 'bad2', 'bing': 'a', 'bang': 'b', 'bong': 'c'}], indirect=['stdin'])
def test_fail_mutually_exclusive(self, capfd, stdin, complex_argspec):
"""Fail because of mutually exclusive parameters"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are mutually exclusive: bar|bam, bing|bang|bong"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'bad2'}], indirect=['stdin'])
def test_fail_required_together(self, capfd, stdin, complex_argspec):
"""Fail because only one of a required_together pair of parameters was specified"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'hi'}], indirect=['stdin'])
def test_fail_required_together_and_default(self, capfd, stdin, complex_argspec):
"""Fail because one of a required_together pair of parameters has a default and the other was not specified"""
complex_argspec['argument_spec']['baz'] = {'default': 42}
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello'}], indirect=['stdin'])
def test_fail_required_together_and_fallback(self, capfd, mocker, stdin, complex_argspec):
"""Fail because one of a required_together pair of parameters has a fallback and the other was not specified"""
environ = os.environ.copy()
environ['BAZ'] = 'test data'
mocker.patch('ansible.module_utils.basic.os.environ', environ)
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'four', 'five']}], indirect=['stdin'])
def test_fail_list_with_choices(self, capfd, mocker, stdin, complex_argspec):
"""Fail because one of the items is not in the choice"""
with pytest.raises(SystemExit):
basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "value of zardoz2 must be one or more of: one, two, three. Got no match for: four, five"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'three']}], indirect=['stdin'])
def test_list_with_choices(self, capfd, mocker, stdin, complex_argspec):
"""Test choices with list"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['zardoz2'], list)
assert am.params['zardoz2'] == ['one', 'three']
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar3': ['~/test', 'test/']}], indirect=['stdin'])
def test_list_with_elements_path(self, capfd, mocker, stdin, complex_argspec):
"""Test choices with list"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['bar3'], list)
assert am.params['bar3'][0].startswith('/')
assert am.params['bar3'][1] == 'test/'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zodraz': 'one'}], indirect=['stdin'])
def test_deprecated_alias(self, capfd, mocker, stdin, complex_argspec):
"""Test a deprecated alias"""
am = basic.AnsibleModule(**complex_argspec)
assert "Alias 'zodraz' is deprecated." in get_deprecation_messages()[0]['msg']
assert get_deprecation_messages()[0]['version'] == '9.99'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar_str': [867, '5309']}], indirect=['stdin'])
def test_list_with_elements_callable_str(self, capfd, mocker, stdin, complex_argspec):
"""Test choices with list"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['bar_str'], list)
assert isinstance(am.params['bar_str'][0], str)
assert isinstance(am.params['bar_str'][1], str)
assert am.params['bar_str'][0] == '867'
assert am.params['bar_str'][1] == '5309'
| TestComplexArgSpecs |
python | urllib3__urllib3 | test/test_response.py | {
"start": 56196,
"end": 56368
} | class ____(MockChunkedEncodingResponse):
def _encode_chunk(self, chunk: bytes) -> bytes:
return f"9999\r\n{chunk.decode()}\r\n".encode()
| MockChunkedIncompleteRead |
python | cython__cython | tests/run/withstat_py27.py | {
"start": 2476,
"end": 2590
} | class ____(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
| EnterRaises |
python | davidhalter__jedi | jedi/api/errors.py | {
"start": 214,
"end": 1253
} | class ____:
"""
Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
"""
def __init__(self, parso_error):
self._parso_error = parso_error
@property
def line(self):
"""The line where the error starts (starting with 1)."""
return self._parso_error.start_pos[0]
@property
def column(self):
"""The column where the error starts (starting with 0)."""
return self._parso_error.start_pos[1]
@property
def until_line(self):
"""The line where the error ends (starting with 1)."""
return self._parso_error.end_pos[0]
@property
def until_column(self):
"""The column where the error ends (starting with 0)."""
return self._parso_error.end_pos[1]
def get_message(self):
return self._parso_error.message
def __repr__(self):
return '<%s from=%s to=%s>' % (
self.__class__.__name__,
self._parso_error.start_pos,
self._parso_error.end_pos,
)
| SyntaxError |
python | vyperlang__vyper | tests/evm_backends/base_env.py | {
"start": 1023,
"end": 8309
} | class ____:
"""
Base class for EVM backends.
It provides a common interface for deploying contracts and interacting with them.
"""
DEFAULT_CHAIN_ID = 1
def __init__(self, gas_limit: int, account_keys: list[PrivateKey]) -> None:
self.gas_limit = gas_limit
self._keys = account_keys
self.deployer: str = self._keys[0].public_key.to_checksum_address()
def deploy(self, abi: list[dict], bytecode: bytes, value=0, *args, **kwargs):
"""Deploy a contract with the given ABI and bytecode."""
factory = ABIContractFactory.from_abi_dict(abi, bytecode=bytecode)
initcode = bytecode
if args or kwargs:
ctor_abi = next(i for i in abi if i["type"] == "constructor")
ctor = ABIFunction(ctor_abi, contract_name=factory._name)
initcode += ctor.prepare_calldata(*args, **kwargs)
deployed_at = self._deploy(initcode, value)
address = to_checksum_address(deployed_at)
return factory.at(self, address)
def deploy_source(
self,
source_code: str,
output_formats: Iterable[str],
*args,
compiler_settings: Settings = None,
input_bundle: InputBundle = None,
value: int = 0,
storage_layout_override=None,
**kwargs,
) -> ABIContract:
"""Compile and deploy a contract from source code."""
abi, bytecode = _compile(
source_code, output_formats, input_bundle, compiler_settings, storage_layout_override
)
return self.deploy(abi, bytecode, value, *args, **kwargs)
def deploy_blueprint(
self,
source_code,
output_formats,
*args,
input_bundle: InputBundle = None,
initcode_prefix: bytes = ERC5202_PREFIX,
):
"""Deploy a contract with a blueprint pattern."""
abi, bytecode = _compile(source_code, output_formats, input_bundle)
bytecode = initcode_prefix + bytecode
bytecode_len = len(bytecode)
bytecode_len_hex = hex(bytecode_len)[2:].rjust(4, "0")
# prepend a quick deploy preamble
deploy_preamble = bytes.fromhex("61" + bytecode_len_hex + "3d81600a3d39f3")
deploy_bytecode = deploy_preamble + bytecode
deployer_abi: list[dict] = [] # just a constructor
deployer = self.deploy(deployer_abi, deploy_bytecode, *args)
def factory(address):
return ABIContractFactory.from_abi_dict(abi).at(self, address)
return deployer, factory
def get_logs(self, contract: ABIContract, event_name: str = None, raw=False):
logs = [log for log in self.last_result.logs if contract.address == log.address]
if raw:
return [log.data for log in logs]
parsed_logs = [contract.parse_log(log) for log in logs]
if event_name:
return [log for log in parsed_logs if log.event == event_name]
return parsed_logs
@property
def accounts(self) -> list[str]:
return [key.public_key.to_checksum_address() for key in self._keys]
@contextmanager
def anchor(self):
raise NotImplementedError # must be implemented by subclasses
def get_balance(self, address: str) -> int:
raise NotImplementedError # must be implemented by subclasses
def set_balance(self, address: str, value: int):
raise NotImplementedError # must be implemented by subclasses
@property
def block_number(self) -> int:
raise NotImplementedError # must be implemented by subclasses
@block_number.setter
def block_number(self, value: int):
raise NotImplementedError
@property
def timestamp(self) -> int | None:
raise NotImplementedError # must be implemented by subclasses
@timestamp.setter
def timestamp(self, value: int):
raise NotImplementedError # must be implemented by subclasses
@property
def last_result(self) -> ExecutionResult:
raise NotImplementedError # must be implemented by subclasses
@property
def blob_hashes(self) -> list[bytes]:
raise NotImplementedError # must be implemented by subclasses
@blob_hashes.setter
def blob_hashes(self, value: list[bytes]):
raise NotImplementedError # must be implemented by subclasses
def message_call(
self,
to: str,
sender: str | None = None,
data: bytes | str = b"",
value: int = 0,
gas: int | None = None,
gas_price: int = 0,
is_modifying: bool = True,
) -> bytes:
raise NotImplementedError # must be implemented by subclasses
def clear_transient_storage(self) -> None:
raise NotImplementedError # must be implemented by subclasses
def get_code(self, address: str) -> bytes:
raise NotImplementedError # must be implemented by subclasses
def get_excess_blob_gas(self) -> Optional[int]:
raise NotImplementedError # must be implemented by subclasses
def set_excess_blob_gas(self, param):
raise NotImplementedError # must be implemented by subclasses
def _deploy(self, code: bytes, value: int, gas: int | None = None) -> str:
raise NotImplementedError # must be implemented by subclasses
@staticmethod
def _parse_revert(output_bytes: bytes, error: Exception, gas_used: int):
"""
Tries to parse the EIP-838 revert reason from the output bytes.
"""
if output_bytes[:4] == method_id("Error(string)"):
(msg,) = abi_decode("(string)", output_bytes[4:])
raise ExecutionReverted(f"{msg}", gas_used) from error
raise ExecutionReverted(f"0x{output_bytes.hex()}", gas_used) from error
@property
def invalid_opcode_error(self) -> str:
"""Expected error message when invalid opcode is executed."""
raise NotImplementedError # must be implemented by subclasses
@property
def out_of_gas_error(self) -> str:
"""Expected error message when user runs out of gas"""
raise NotImplementedError # must be implemented by subclasses
@property
def contract_size_limit_error(self) -> str:
"""Expected error message when contract is over codesize limit"""
raise NotImplementedError # must be implemented by subclasses
@property
def initcode_size_limit_error(self) -> str:
"""Expected error message when contract is over codesize limit"""
raise NotImplementedError # must be implemented by subclasses
def _compile(
source_code: str,
output_formats: Iterable[str],
input_bundle: InputBundle = None,
settings: Settings = None,
storage_layout_override=None,
) -> tuple[list[dict], bytes]:
out = compile_code(
source_code,
# test that all output formats can get generated
output_formats=output_formats,
settings=settings,
input_bundle=input_bundle,
show_gas_estimates=True, # Enable gas estimates for testing
storage_layout_override=storage_layout_override,
)
parse_vyper_source(source_code) # Test grammar.
json.dumps(out["metadata"]) # test metadata is json serializable
return out["abi"], bytes.fromhex(out["bytecode"].removeprefix("0x"))
| BaseEnv |
python | kamyu104__LeetCode-Solutions | Python/minimum-common-value.py | {
"start": 44,
"end": 468
} | class ____(object):
def getCommon(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
i = j = 0
while i < len(nums1) and j < len(nums2):
if nums1[i] < nums2[j]:
i += 1
elif nums1[i] > nums2[j]:
j += 1
else:
return nums1[i]
return -1
| Solution |
python | getsentry__sentry | tests/sentry/utils/test_audit.py | {
"start": 942,
"end": 21383
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user(username=username)
self.req = fake_http_request(self.user)
self.org = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.org)
self.project = self.create_project(teams=[self.team], platform="java")
def assert_no_delete_log_created(self):
with assume_test_silo_mode(SiloMode.REGION):
assert not DeletedOrganization.objects.filter(slug=self.org.slug).exists()
assert not DeletedTeam.objects.filter(slug=self.team.slug).exists()
assert not DeletedProject.objects.filter(slug=self.project.slug).exists()
def test_audit_entry_api(self) -> None:
org = self.create_organization()
apikey = self.create_api_key(org, allowed_origins="*")
req = fake_http_request(AnonymousUser())
req.auth = apikey
entry = create_audit_entry(req, organization_id=org.id, data={"thing": "to True"})
assert entry.actor_key == apikey
assert entry.actor is None
assert entry.ip_address == req.META["REMOTE_ADDR"]
self.assert_no_delete_log_created()
def test_audit_entry_frontend(self) -> None:
org = self.create_organization()
req = fake_http_request(self.create_user())
entry = create_audit_entry(req, organization_id=org.id, data={"thing": "to True"})
assert entry.actor == req.user
assert entry.actor_key is None
assert entry.ip_address == req.META["REMOTE_ADDR"]
self.assert_no_delete_log_created()
def test_audit_entry_org_add_log(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=audit_log.get_event_id("ORG_ADD"),
data=self.org.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.actor_label is None
assert entry.target_object == self.org.id
assert entry.event == audit_log.get_event_id("ORG_ADD")
audit_log_event = audit_log.get(entry.event)
assert audit_log_event.render(entry) == "created the organization"
def test_audit_entry_org_add_log_with_channel(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=audit_log.get_event_id("ORG_ADD"),
data={
"channel": "vercel",
**self.org.get_audit_log_data(),
},
)
assert entry.actor == self.user
assert entry.actor_label is None
assert entry.target_object == self.org.id
assert entry.event == audit_log.get_event_id("ORG_ADD")
audit_log_event = audit_log.get(entry.event)
assert audit_log_event.render(entry) == "created the organization with vercel integration"
def test_audit_entry_org_delete_log(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=audit_log.get_event_id("ORG_REMOVE"),
data=self.org.get_audit_log_data(),
)
assert entry.actor == self.user
assert (
entry.actor_label is None
) # you won't get this back since it only expands on the control silo
assert entry.target_object == self.org.id
assert entry.event == audit_log.get_event_id("ORG_REMOVE")
deleted_org = DeletedOrganization.objects.get(slug=self.org.slug)
self.assert_valid_deleted_log(deleted_org, self.org)
def test_audit_entry_org_restore_log(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
Organization.objects.get(id=self.organization.id).update(
status=OrganizationStatus.PENDING_DELETION
)
org = Organization.objects.get(id=self.organization.id)
Organization.objects.get(id=self.organization.id).update(
status=OrganizationStatus.DELETION_IN_PROGRESS
)
org2 = Organization.objects.get(id=self.organization.id)
Organization.objects.get(id=self.organization.id).update(
status=OrganizationStatus.ACTIVE
)
org3 = Organization.objects.get(id=self.organization.id)
orgs = [org, org2, org3]
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=audit_log.get_event_id("ORG_RESTORE"),
data=self.org.get_audit_log_data(),
)
audit_log_event = audit_log.get(entry.event)
entry2 = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=audit_log.get_event_id("ORG_EDIT"),
data=self.org.get_audit_log_data(),
)
audit_log_event2 = audit_log.get(entry2.event)
for i in orgs:
if (
i.status == OrganizationStatus.PENDING_DELETION
or i.status == OrganizationStatus.DELETION_IN_PROGRESS
):
assert i.status != OrganizationStatus.ACTIVE
assert ("restored") in audit_log_event.render(entry)
assert entry.actor == self.user
assert entry.target_object == self.org.id
assert entry.event == audit_log.get_event_id("ORG_RESTORE")
else:
assert i.status == OrganizationStatus.ACTIVE
assert ("edited") in audit_log_event2.render(entry2)
assert entry2.actor == self.user
assert entry2.target_object == self.org.id
assert entry2.event == audit_log.get_event_id("ORG_EDIT")
def test_audit_entry_team_delete_log(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.team.id,
event=audit_log.get_event_id("TEAM_REMOVE"),
data=self.team.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.target_object == self.team.id
assert entry.event == audit_log.get_event_id("TEAM_REMOVE")
deleted_team = DeletedTeam.objects.get(slug=self.team.slug)
self.assert_valid_deleted_log(deleted_team, self.team)
def test_audit_entry_api_key(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
key = self.create_api_key(self.organization)
with outbox_runner():
create_audit_entry_from_user(
None,
api_key=key,
organization=self.organization,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_ADD"),
data={"thing": "to True"},
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert (
AuditLogEntry.objects.get(event=audit_log.get_event_id("PROJECT_ADD")).actor_label
== key.key
)
def test_audit_entry_project_delete_log(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_REMOVE"),
data=self.project.get_audit_log_data(),
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_REMOVE")
assert audit_log_event.render(entry) == "removed project" + " " + self.project.slug
deleted_project = DeletedProject.objects.get(slug=self.project.slug)
self.assert_valid_deleted_log(deleted_project, self.project)
assert deleted_project.platform == self.project.platform
def test_audit_entry_project_delete_with_origin_log(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_REMOVE_WITH_ORIGIN"),
data={**self.project.get_audit_log_data(), "origin": "settings"},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_REMOVE_WITH_ORIGIN")
assert (
audit_log_event.render(entry)
== "removed project" + " " + self.project.slug + " in settings"
)
deleted_project = DeletedProject.objects.get(slug=self.project.slug)
self.assert_valid_deleted_log(deleted_project, self.project)
assert deleted_project.platform == self.project.platform
def test_audit_entry_project_create_with_origin_log(self) -> None:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_ADD_WITH_ORIGIN"),
data={**self.project.get_audit_log_data(), "origin": "ui"},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_ADD_WITH_ORIGIN")
assert (
audit_log_event.render(entry) == "created project" + " " + self.project.slug + " via ui"
)
def test_audit_entry_project_edit_log(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_EDIT"),
data={"old_slug": "old", "new_slug": "new"},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_EDIT")
assert audit_log_event.render(entry) == "renamed project slug from old to new"
def test_audit_entry_project_edit_log_regression(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_EDIT"),
data={"new_slug": "new"},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_EDIT")
assert audit_log_event.render(entry) == "edited project settings in new_slug to new"
def test_audit_entry_project_performance_setting_disable_detection(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_PERFORMANCE_ISSUE_DETECTION_CHANGE"),
data={"file_io_on_main_thread_detection_enabled": False},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_PERFORMANCE_ISSUE_DETECTION_CHANGE")
assert (
audit_log_event.render(entry)
== "edited project performance issue detector settings to disable detection of File IO on Main Thread issue"
)
def test_audit_entry_project_performance_setting_enable_detection(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_PERFORMANCE_ISSUE_DETECTION_CHANGE"),
data={"file_io_on_main_thread_detection_enabled": True},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_PERFORMANCE_ISSUE_DETECTION_CHANGE")
assert (
audit_log_event.render(entry)
== "edited project performance issue detector settings to enable detection of File IO on Main Thread issue"
)
def test_audit_entry_project_ownership_rule_edit(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECT_OWNERSHIPRULE_EDIT"),
data={"thing": "to True"},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECT_OWNERSHIPRULE_EDIT")
assert audit_log_event.render(entry) == "modified ownership rules"
def test_audit_entry_project_key_edit(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECTKEY_EDIT"),
data={
"public_key": "KEY",
"rate_limit_count": 6,
"rate_limit_window": 60,
},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECTKEY_EDIT")
assert audit_log_event.render(entry) == "edited project key KEY"
def test_audit_entry_project_key_rate_limit_edit(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECTKEY_EDIT"),
data={
"public_key": "KEY",
"prev_rate_limit_count": None,
"prev_rate_limit_window": None,
"rate_limit_count": 6,
"rate_limit_window": 60,
},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECTKEY_EDIT")
assert (
audit_log_event.render(entry)
== "edited project key KEY: rate limit count from None to 6, rate limit window from None to 60"
)
def test_audit_entry_project_key_rate_limit_window_edit(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECTKEY_EDIT"),
data={
"public_key": "KEY",
"prev_rate_limit_window": None,
"rate_limit_count": 6,
"rate_limit_window": 60,
},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECTKEY_EDIT")
assert (
audit_log_event.render(entry)
== "edited project key KEY: rate limit window from None to 60"
)
def test_audit_entry_project_key_rate_limit_count_edit(self) -> None:
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=audit_log.get_event_id("PROJECTKEY_EDIT"),
data={
"public_key": "KEY",
"prev_rate_limit_count": None,
"rate_limit_count": 6,
"rate_limit_window": 60,
},
)
audit_log_event = audit_log.get(entry.event)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("PROJECTKEY_EDIT")
assert (
audit_log_event.render(entry)
== "edited project key KEY: rate limit count from None to 6"
)
def test_audit_entry_integration_log(self) -> None:
project = self.create_project()
self.login_as(user=self.user)
entry = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=audit_log.get_event_id("INTEGRATION_ADD"),
data={"integration": "webhooks", "project": project.slug},
)
audit_log_event = audit_log.get(entry.event)
assert ("enabled") in audit_log_event.render(entry)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == audit_log.get_event_id("INTEGRATION_ADD")
entry2 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=audit_log.get_event_id("INTEGRATION_EDIT"),
data={"integration": "webhooks", "project": project.slug},
)
audit_log_event2 = audit_log.get(entry2.event)
assert ("edited") in audit_log_event2.render(entry2)
assert entry2.actor == self.user
assert entry2.target_object == self.project.id
assert entry2.event == audit_log.get_event_id("INTEGRATION_EDIT")
entry3 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.integration.id,
event=audit_log.get_event_id("INTEGRATION_EDIT"),
data={"provider": "github", "name": "config"},
)
audit_log_event3 = audit_log.get(entry3.event)
assert ("edited the config for the github integration") in audit_log_event3.render(entry3)
assert entry3.actor == self.user
assert entry3.target_object == self.integration.id
assert entry3.event == audit_log.get_event_id("INTEGRATION_EDIT")
entry4 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=audit_log.get_event_id("INTEGRATION_REMOVE"),
data={"integration": "webhooks", "project": project.slug},
)
audit_log_event4 = audit_log.get(entry4.event)
assert ("disable") in audit_log_event4.render(entry4)
assert entry4.actor == self.user
assert entry4.target_object == self.project.id
assert entry4.event == audit_log.get_event_id("INTEGRATION_REMOVE")
def test_create_system_audit_entry(self) -> None:
entry = create_system_audit_entry(
organization=self.org,
target_object=self.org.id,
event=audit_log.get_event_id("SSO_DISABLE"),
data={"provider": "GitHub"},
)
assert entry.event == audit_log.get_event_id("SSO_DISABLE")
assert entry.actor_label == "Sentry"
assert entry.organization_id == self.org.id
assert entry.target_object == self.org.id
assert audit_log.get(entry.event).render(entry) == "disabled sso (GitHub)"
| CreateAuditEntryTest |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/extra_trees.py | {
"start": 582,
"end": 6625
} | class ____(
IterativeComponent,
AutoSklearnRegressionAlgorithm,
):
def __init__(
self,
criterion,
min_samples_leaf,
min_samples_split,
max_features,
bootstrap,
max_leaf_nodes,
max_depth,
min_weight_fraction_leaf,
min_impurity_decrease,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
):
self.n_estimators = self.get_max_iter()
self.criterion = criterion
self.max_leaf_nodes = max_leaf_nodes
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.estimator = None
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_estimators
def iterative_fit(self, X, y, n_iter=1, refit=False):
from sklearn.ensemble import ExtraTreesRegressor as ETR
if refit:
self.estimator = None
if self.estimator is None:
self.n_estimators = int(self.n_estimators)
if self.criterion not in ("mse", "friedman_mse", "mae"):
raise ValueError(
"'criterion' is not in ('mse', 'friedman_mse', "
"'mae): %s" % self.criterion
)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.max_features = float(self.max_features)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.oob_score = check_for_bool(self.oob_score)
self.bootstrap = check_for_bool(self.bootstrap)
self.n_jobs = int(self.n_jobs)
self.verbose = int(self.verbose)
self.estimator = ETR(
n_estimators=n_iter,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
warm_start=True,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.n_estimators
)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
return not len(self.estimator.estimators_) < self.n_estimators
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "ET",
"name": "Extra Trees Regressor",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": True,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter(
"criterion", ["mse", "friedman_mse", "mae"]
)
max_features = UniformFloatHyperparameter(
"max_features", 0.1, 1.0, default_value=1
)
max_depth = UnParametrizedHyperparameter(name="max_depth", value="None")
min_weight_fraction_leaf = UnParametrizedHyperparameter(
"min_weight_fraction_leaf", 0.0
)
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_impurity_decrease = UnParametrizedHyperparameter(
"min_impurity_decrease", 0.0
)
bootstrap = CategoricalHyperparameter(
"bootstrap", ["True", "False"], default_value="False"
)
cs.add_hyperparameters(
[
criterion,
max_features,
max_depth,
max_leaf_nodes,
min_samples_split,
min_samples_leaf,
min_impurity_decrease,
min_weight_fraction_leaf,
bootstrap,
]
)
return cs
| ExtraTreesRegressor |
python | huggingface__transformers | src/transformers/models/timm_wrapper/image_processing_timm_wrapper.py | {
"start": 1194,
"end": 5348
} | class ____(BaseImageProcessor):
"""
Wrapper class for timm models to be used within transformers.
Args:
pretrained_cfg (`dict[str, Any]`):
The configuration of the pretrained model used to resolve evaluation and
training transforms.
architecture (`Optional[str]`, *optional*):
Name of the architecture of the model.
"""
main_input_name = "pixel_values"
def __init__(
self,
pretrained_cfg: dict[str, Any],
architecture: Optional[str] = None,
**kwargs,
):
requires_backends(self, "timm")
super().__init__(architecture=architecture)
self.data_config = timm.data.resolve_data_config(pretrained_cfg, model=None, verbose=False)
self.val_transforms = timm.data.create_transform(**self.data_config, is_training=False)
# useful for training, see examples/pytorch/image-classification/run_image_classification.py
self.train_transforms = timm.data.create_transform(**self.data_config, is_training=True)
# If `ToTensor` is in the transforms, then the input should be numpy array or PIL image.
# Otherwise, the input can be a tensor. In later timm versions, `MaybeToTensor` is used
# which can handle both numpy arrays / PIL images and tensors.
self._not_supports_tensor_input = any(
transform.__class__.__name__ == "ToTensor" for transform in self.val_transforms.transforms
)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
"""
output = super().to_dict()
output.pop("train_transforms", None)
output.pop("val_transforms", None)
output.pop("_not_supports_tensor_input", None)
return output
@classmethod
def get_image_processor_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> tuple[dict[str, Any], dict[str, Any]]:
"""
Get the image processor dict for the model.
"""
image_processor_filename = kwargs.pop("image_processor_filename", "config.json")
return super().get_image_processor_dict(
pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs
)
def preprocess(
self,
images: ImageInput,
return_tensors: Optional[Union[str, TensorType]] = "pt",
) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
"""
if return_tensors != "pt":
raise ValueError(f"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}")
if self._not_supports_tensor_input and isinstance(images, torch.Tensor):
images = images.cpu().numpy()
# If the input is a torch tensor, then no conversion is needed
# Otherwise, we need to pass in a list of PIL images
if isinstance(images, torch.Tensor):
images = self.val_transforms(images)
# Add batch dimension if a single image
images = images.unsqueeze(0) if images.ndim == 3 else images
else:
images = make_flat_list_of_images(images)
images = [to_pil_image(image) for image in images]
images = torch.stack([self.val_transforms(image) for image in images])
return BatchFeature({"pixel_values": images}, tensor_type=return_tensors)
def save_pretrained(self, *args, **kwargs):
# disable it to make checkpoint the same as in `timm` library.
logger.warning_once(
"The `save_pretrained` method is disabled for TimmWrapperImageProcessor. "
"The image processor configuration is saved directly in `config.json` when "
"`save_pretrained` is called for saving the model."
)
__all__ = ["TimmWrapperImageProcessor"]
| TimmWrapperImageProcessor |
python | ray-project__ray | python/ray/data/preprocessors/discretizer.py | {
"start": 7217,
"end": 16408
} | class ____(_AbstractKBinsDiscretizer):
"""Bin values into discrete intervals (bins) of uniform width.
Columns must contain numerical values.
Examples:
Use :class:`UniformKBinsDiscretizer` to bin continuous features.
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import UniformKBinsDiscretizer
>>> df = pd.DataFrame({
... "value_1": [0.2, 1.4, 2.5, 6.2, 9.7, 2.1],
... "value_2": [10, 15, 13, 12, 23, 25],
... })
>>> ds = ray.data.from_pandas(df)
>>> discretizer = UniformKBinsDiscretizer(
... columns=["value_1", "value_2"], bins=4
... )
>>> discretizer.fit_transform(ds).to_pandas()
value_1 value_2
0 0 0
1 0 1
2 0 0
3 2 0
4 3 3
5 0 3
:class:`UniformKBinsDiscretizer` can also be used in append mode by providing the
name of the output_columns that should hold the encoded values.
>>> discretizer = UniformKBinsDiscretizer(
... columns=["value_1", "value_2"],
... bins=4,
... output_columns=["value_1_discretized", "value_2_discretized"]
... )
>>> discretizer.fit_transform(ds).to_pandas() # doctest: +SKIP
value_1 value_2 value_1_discretized value_2_discretized
0 0.2 10 0 0
1 1.4 15 0 1
2 2.5 13 0 0
3 6.2 12 2 0
4 9.7 23 3 3
5 2.1 25 0 3
You can also specify different number of bins per column.
>>> discretizer = UniformKBinsDiscretizer(
... columns=["value_1", "value_2"], bins={"value_1": 4, "value_2": 3}
... )
>>> discretizer.fit_transform(ds).to_pandas()
value_1 value_2
0 0 0
1 0 0
2 0 0
3 2 0
4 3 2
5 0 2
Args:
columns: The columns to discretize.
bins: Defines the number of equal-width bins.
Can be either an integer (which will be applied to all columns),
or a dict that maps columns to integers.
The range is extended by .1% on each side to include
the minimum and maximum values.
right: Indicates whether bins includes the rightmost edge or not.
include_lowest: Whether the first interval should be left-inclusive
or not.
duplicates: Can be either 'raise' or 'drop'. If bin edges are not unique,
raise ``ValueError`` or drop non-uniques.
dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype``
objects or ``np.integer`` types. If you don't include a column in ``dtypes``
or specify it as an integer dtype, the outputted column will consist of
ordered integers corresponding to bins. If you use a
``pd.CategoricalDtype``, the outputted column will be a
``pd.CategoricalDtype`` with the categories being mapped to bins.
You can use ``pd.CategoricalDtype(categories, ordered=True)`` to
preserve information about bin order.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
.. seealso::
:class:`CustomKBinsDiscretizer`
If you want to specify your own bin edges.
"""
def __init__(
self,
columns: List[str],
bins: Union[int, Dict[str, int]],
*,
right: bool = True,
include_lowest: bool = False,
duplicates: str = "raise",
dtypes: Optional[
Dict[str, Union[pd.CategoricalDtype, Type[np.integer]]]
] = None,
output_columns: Optional[List[str]] = None,
):
super().__init__()
self.columns = columns
self.bins = bins
self.right = right
self.include_lowest = include_lowest
self.duplicates = duplicates
self.dtypes = dtypes
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _fit(self, dataset: "Dataset") -> Preprocessor:
self._validate_on_fit()
if isinstance(self.bins, dict):
columns = self.bins.keys()
else:
columns = self.columns
for column in columns:
bins = self.bins[column] if isinstance(self.bins, dict) else self.bins
if not isinstance(bins, int):
raise TypeError(
f"`bins` must be an integer or a dict of integers, got {bins}"
)
self.stat_computation_plan.add_aggregator(
aggregator_fn=Min,
columns=columns,
)
self.stat_computation_plan.add_aggregator(
aggregator_fn=Max,
columns=columns,
)
return self
def _validate_on_fit(self):
self._validate_bins_columns()
def _fit_execute(self, dataset: "Dataset"):
stats = self.stat_computation_plan.compute(dataset)
self.stats_ = post_fit_processor(stats, self.bins, self.right)
return self
def post_fit_processor(aggregate_stats: dict, bins: Union[str, Dict], right: bool):
mins, maxes, stats = {}, {}, {}
for key, value in aggregate_stats.items():
column_name = key[4:-1] # min(column) -> column
if key.startswith("min"):
mins[column_name] = value
if key.startswith("max"):
maxes[column_name] = value
for column in mins.keys():
stats[column] = _translate_min_max_number_of_bins_to_bin_edges(
mn=mins[column],
mx=maxes[column],
bins=bins[column] if isinstance(bins, dict) else bins,
right=right,
)
return stats
# Copied from
# https://github.com/pandas-dev/pandas/blob/v1.4.4/pandas/core/reshape/tile.py#L257
# under
# BSD 3-Clause License
#
# Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc.
# and PyData Development Team
# All rights reserved.
#
# Copyright (c) 2011-2022, Open source contributors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _translate_min_max_number_of_bins_to_bin_edges(
mn: float, mx: float, bins: int, right: bool
) -> List[float]:
"""Translates a range and desired number of bins into list of bin edges."""
rng = (mn, mx)
mn, mx = (mi + 0.0 for mi in rng)
if np.isinf(mn) or np.isinf(mx):
raise ValueError(
"Cannot specify integer `bins` when input data contains infinity."
)
elif mn == mx: # adjust end points before binning
mn -= 0.001 * abs(mn) if mn != 0 else 0.001
mx += 0.001 * abs(mx) if mx != 0 else 0.001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
return bins
# TODO(ml-team)
# Add QuantileKBinsDiscretizer
| UniformKBinsDiscretizer |
python | pytorch__pytorch | test/profiler/test_memory_profiler.py | {
"start": 1780,
"end": 2350
} | class ____(torch.nn.Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.in_features = in_features
self.out_features = out_features
def forward(self, x) -> torch.Tensor:
if getattr(self, "weight", None) is None:
self.weight = torch.nn.Parameter(
torch.empty((self.out_features, self.in_features))
)
self.bias = torch.nn.Parameter(torch.empty(self.out_features))
return torch.nn.functional.linear(x, self.weight, self.bias)
| LazyLinear |
python | django-import-export__django-import-export | tests/core/tests/test_forms.py | {
"start": 2989,
"end": 9664
} | class ____(AdminTestMixin, TestCase):
@classmethod
def setUpTestData(cls) -> None:
cls.resources = (BookResource, BookResourceWithStoreInstance)
cls.form = forms.SelectableFieldsExportForm(
formats=(CSV,),
resources=cls.resources,
)
def test_create_boolean_fields(self) -> None:
form_fields = self.form.fields
for resource in self.resources:
fields = resource().get_export_order()
for field in fields:
field_name = forms.SelectableFieldsExportForm.create_boolean_field_name(
resource, field
)
self.assertIn(field_name, form_fields)
form_field = form_fields[field_name]
self.assertIsInstance(form_field, django.forms.BooleanField)
def test_form_raises_validation_error_when_no_resource_fields_are_selected(
self,
) -> None:
data = {"resource": "0", "format": "0", "bookresource_id": False}
form = forms.SelectableFieldsExportForm(
formats=(CSV,), resources=self.resources, data=data
)
self.assertFalse(form.is_valid())
self.assertTrue("Select at least 1 field for" in form.errors.as_text())
def test_remove_unselected_resource_fields_on_validation(self):
data = {"resource": "0", "format": "0"}
self._prepend_form_prefix(data)
# Add all field values to form data for validation
for resource in self.resources:
for field in resource().get_export_order():
data[
forms.SelectableFieldsExportForm.create_boolean_field_name(
resource, field
)
] = True
form = forms.SelectableFieldsExportForm(
formats=(CSV,), resources=self.resources, data=data
)
self.assertTrue(form.is_valid())
selected_resource = self.resources[0]
selected_resource_fields = selected_resource().get_export_order()
not_selected_resource = self.resources[1] # resource on index 0 was selected
for field in not_selected_resource().get_export_order():
# Only assert fields which doesn't exist in selected resource's fields
if field not in selected_resource_fields:
self.assertNotIn(field, form.cleaned_data)
def test_normalize_resource_field_names(self) -> None:
"""
Field names are combination of resource's name and field name.
After validation, fields that belong to unselected resources are removed
and resource name is removed from field names
"""
data = {"resource": "0", "format": "0"}
self._prepend_form_prefix(data)
# Add all field values to form data for validation
for resource in self.resources:
for field in resource().get_export_order():
data[
forms.SelectableFieldsExportForm.create_boolean_field_name(
resource, field
)
] = "on"
form = forms.SelectableFieldsExportForm(
formats=(CSV,), resources=self.resources, data=data
)
self.assertTrue(form.is_valid())
selected_resource = self.resources[0]
for field in selected_resource().get_export_order():
self.assertIn(field, form.cleaned_data)
def test_get_selected_resource_fields_without_validation_raises_validation_error(
self,
) -> None:
self.assertRaises(
django.forms.ValidationError, self.form.get_selected_resource_export_fields
)
def test_get_field_label(self):
"""test SelectableFieldsExportForm._get_field_label"""
form = forms.SelectableFieldsExportForm(
formats=(CSV,), resources=(BookResource,)
)
resource = BookResource()
self.assertEqual(
form._get_field_label(resource, "bookresource_id"),
"Bookresource Id",
)
self.assertEqual(
form._get_field_label(resource, "published"), "Published (published_date)"
)
def test_get_selected_rerource_fields(self) -> None:
data = {"resource": "0", "format": "0"}
self._prepend_form_prefix(data)
form = forms.SelectableFieldsExportForm(
formats=(CSV,), resources=self.resources, data=data
)
for resource in self.resources:
for field in resource().get_export_order():
data[
forms.SelectableFieldsExportForm.create_boolean_field_name(
resource, field
)
] = "on"
self.assertTrue(form.is_valid())
selected_resource = self.resources[0]()
self.assertEqual(
form.get_selected_resource_export_fields(),
list(selected_resource.get_export_order()),
)
def test_fields_order(self) -> None:
form = forms.SelectableFieldsExportForm(
formats=(CSV,), resources=(BookResource,)
)
self.assertEqual(
list(form.fields.keys()),
[
"resource",
"bookresource_id",
"bookresource_name",
"bookresource_author",
"bookresource_author_email",
"bookresource_published",
"bookresource_published_time",
"bookresource_price",
"bookresource_added",
"bookresource_categories",
"format",
"export_items",
],
)
def test_resource_boolean_field_attributes(self) -> None:
for resource_index, resource in enumerate(self.resources):
resource_fields = resource().get_export_order()
initial_field_checked = False
for resource_field in resource_fields:
field_name = forms.SelectableFieldsExportForm.create_boolean_field_name(
resource, resource_field
)
form_field = self.form.fields[field_name]
if not initial_field_checked:
self.assertTrue(form_field.initial_field)
initial_field_checked = True
self.assertTrue(form_field.is_selectable_field)
self.assertEqual(form_field.resource_name, resource.__name__)
self.assertEqual(form_field.resource_index, resource_index)
self.assertEqual(form_field.widget.attrs["resource-id"], resource_index)
| SelectableFieldsExportFormTest |
python | astropy__astropy | astropy/visualization/stretch.py | {
"start": 14520,
"end": 18017
} | class ____(BaseStretch):
r"""
A log stretch.
The stretch is given by:
.. math::
y = \frac{\log{(a x + 1)}}{\log{(a + 1)}}
Parameters
----------
a : float
The ``a`` parameter used in the above formula. The stretch
becomes more linear for small ``a`` values. ``a`` must be
greater than 0. Default is 1000.
Examples
--------
.. plot::
:show-source-link:
import numpy as np
from astropy.visualization import LogStretch
from matplotlib import pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
x = np.linspace(0, 1, 100)
a_vals = (0.1, 1, 3, 10, 30, 100, 1000, 10000)
for a in a_vals:
if a == 1000:
lw = 3
else:
lw = 1
stretch = LogStretch(a)
label = f'{a=}'
ax.plot(x, stretch(x, clip=True), label=label, lw=lw)
ax.axis('equal')
ax.plot(x, x, ls='dotted', color='k', alpha=0.3)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel('Input Value')
ax.set_ylabel('Output Value')
ax.set_title(stretch.__class__.__name__)
ax.legend(loc='lower right', fontsize=8)
"""
@property
def _supports_invalid_kw(self):
return True
def __init__(self, a=1000.0):
super().__init__()
if a <= 0: # singularity
raise ValueError("a must be > 0")
self.a = a
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array-like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : ndarray, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : ndarray
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = not clip and invalid is not None
with np.errstate(invalid="ignore"):
if replace_invalid:
idx = values < 0
np.multiply(values, self.a, out=values)
np.add(values, 1.0, out=values)
np.log(values, out=values)
np.true_divide(values, np.log(self.a + 1.0), out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedLogStretch(self.a)
| LogStretch |
python | kubernetes-client__python | kubernetes/client/models/v1_endpoint_subset.py | {
"start": 383,
"end": 6047
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'addresses': 'list[V1EndpointAddress]',
'not_ready_addresses': 'list[V1EndpointAddress]',
'ports': 'list[CoreV1EndpointPort]'
}
attribute_map = {
'addresses': 'addresses',
'not_ready_addresses': 'notReadyAddresses',
'ports': 'ports'
}
def __init__(self, addresses=None, not_ready_addresses=None, ports=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointSubset - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._addresses = None
self._not_ready_addresses = None
self._ports = None
self.discriminator = None
if addresses is not None:
self.addresses = addresses
if not_ready_addresses is not None:
self.not_ready_addresses = not_ready_addresses
if ports is not None:
self.ports = ports
@property
def addresses(self):
"""Gets the addresses of this V1EndpointSubset. # noqa: E501
IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize. # noqa: E501
:return: The addresses of this V1EndpointSubset. # noqa: E501
:rtype: list[V1EndpointAddress]
"""
return self._addresses
@addresses.setter
def addresses(self, addresses):
"""Sets the addresses of this V1EndpointSubset.
IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize. # noqa: E501
:param addresses: The addresses of this V1EndpointSubset. # noqa: E501
:type: list[V1EndpointAddress]
"""
self._addresses = addresses
@property
def not_ready_addresses(self):
"""Gets the not_ready_addresses of this V1EndpointSubset. # noqa: E501
IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check. # noqa: E501
:return: The not_ready_addresses of this V1EndpointSubset. # noqa: E501
:rtype: list[V1EndpointAddress]
"""
return self._not_ready_addresses
@not_ready_addresses.setter
def not_ready_addresses(self, not_ready_addresses):
"""Sets the not_ready_addresses of this V1EndpointSubset.
IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check. # noqa: E501
:param not_ready_addresses: The not_ready_addresses of this V1EndpointSubset. # noqa: E501
:type: list[V1EndpointAddress]
"""
self._not_ready_addresses = not_ready_addresses
@property
def ports(self):
"""Gets the ports of this V1EndpointSubset. # noqa: E501
Port numbers available on the related IP addresses. # noqa: E501
:return: The ports of this V1EndpointSubset. # noqa: E501
:rtype: list[CoreV1EndpointPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1EndpointSubset.
Port numbers available on the related IP addresses. # noqa: E501
:param ports: The ports of this V1EndpointSubset. # noqa: E501
:type: list[CoreV1EndpointPort]
"""
self._ports = ports
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointSubset):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointSubset):
return True
return self.to_dict() != other.to_dict()
| V1EndpointSubset |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/state_changes.py | {
"start": 784,
"end": 888
} | class ____(_StateChangeState):
ANY = 1
NO_CHANGE = 2
CHANGE_IN_PROGRESS = 3
| _StateChangeStates |
python | huggingface__transformers | src/transformers/models/qwen3_moe/modular_qwen3_moe.py | {
"start": 2759,
"end": 2820
} | class ____(Qwen2MoeDecoderLayer):
pass
| Qwen3MoeDecoderLayer |
python | Textualize__textual | src/textual/messages.py | {
"start": 1542,
"end": 1766
} | class ____(Message, verbose=True):
"""Sent by Textual when a scroll update is required."""
def can_replace(self, message: Message) -> bool:
return isinstance(message, UpdateScroll)
@rich.repr.auto
| UpdateScroll |
python | ray-project__ray | rllib/env/multi_agent_env.py | {
"start": 17949,
"end": 26795
} | class ____(BaseEnv):
"""Internal adapter of MultiAgentEnv to BaseEnv.
This also supports vectorization if num_envs > 1.
"""
def __init__(
self,
make_env: Callable[[int], EnvType],
existing_envs: List["MultiAgentEnv"],
num_envs: int,
restart_failed_sub_environments: bool = False,
):
"""Wraps MultiAgentEnv(s) into the BaseEnv API.
Args:
make_env: Factory that produces a new MultiAgentEnv instance taking the
vector index as only call argument.
Must be defined, if the number of existing envs is less than num_envs.
existing_envs: List of already existing multi-agent envs.
num_envs: Desired num multiagent envs to have at the end in
total. This will include the given (already created)
`existing_envs`.
restart_failed_sub_environments: If True and any sub-environment (within
this vectorized env) throws any error during env stepping, we will try
to restart the faulty sub-environment. This is done
without disturbing the other (still intact) sub-environments.
"""
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
self.restart_failed_sub_environments = restart_failed_sub_environments
self.terminateds = set()
self.truncateds = set()
while len(self.envs) < self.num_envs:
self.envs.append(self.make_env(len(self.envs)))
for env in self.envs:
assert isinstance(env, MultiAgentEnv)
self._init_env_state(idx=None)
self._unwrapped_env = self.envs[0].unwrapped
@override(BaseEnv)
def poll(
self,
) -> Tuple[
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
MultiEnvDict,
]:
obs, rewards, terminateds, truncateds, infos = {}, {}, {}, {}, {}
for i, env_state in enumerate(self.env_states):
(
obs[i],
rewards[i],
terminateds[i],
truncateds[i],
infos[i],
) = env_state.poll()
return obs, rewards, terminateds, truncateds, infos, {}
@override(BaseEnv)
def send_actions(self, action_dict: MultiEnvDict) -> None:
for env_id, agent_dict in action_dict.items():
if env_id in self.terminateds or env_id in self.truncateds:
raise ValueError(
f"Env {env_id} is already done and cannot accept new actions"
)
env = self.envs[env_id]
try:
obs, rewards, terminateds, truncateds, infos = env.step(agent_dict)
except Exception as e:
if self.restart_failed_sub_environments:
logger.exception(e.args[0])
self.try_restart(env_id=env_id)
obs = e
rewards = {}
terminateds = {"__all__": True}
truncateds = {"__all__": False}
infos = {}
else:
raise e
assert isinstance(
obs, (dict, Exception)
), "Not a multi-agent obs dict or an Exception!"
assert isinstance(rewards, dict), "Not a multi-agent reward dict!"
assert isinstance(terminateds, dict), "Not a multi-agent terminateds dict!"
assert isinstance(truncateds, dict), "Not a multi-agent truncateds dict!"
assert isinstance(infos, dict), "Not a multi-agent info dict!"
if isinstance(obs, dict):
info_diff = set(infos).difference(set(obs))
if info_diff and info_diff != {"__common__"}:
raise ValueError(
"Key set for infos must be a subset of obs (plus optionally "
"the '__common__' key for infos concerning all/no agents): "
"{} vs {}".format(infos.keys(), obs.keys())
)
if "__all__" not in terminateds:
raise ValueError(
"In multi-agent environments, '__all__': True|False must "
"be included in the 'terminateds' dict: got {}.".format(terminateds)
)
elif "__all__" not in truncateds:
raise ValueError(
"In multi-agent environments, '__all__': True|False must "
"be included in the 'truncateds' dict: got {}.".format(truncateds)
)
if terminateds["__all__"]:
self.terminateds.add(env_id)
if truncateds["__all__"]:
self.truncateds.add(env_id)
self.env_states[env_id].observe(
obs, rewards, terminateds, truncateds, infos
)
@override(BaseEnv)
def try_reset(
self,
env_id: Optional[EnvID] = None,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> Optional[Tuple[MultiEnvDict, MultiEnvDict]]:
ret_obs = {}
ret_infos = {}
if isinstance(env_id, int):
env_id = [env_id]
if env_id is None:
env_id = list(range(len(self.envs)))
for idx in env_id:
obs, infos = self.env_states[idx].reset(seed=seed, options=options)
if isinstance(obs, Exception):
if self.restart_failed_sub_environments:
self.env_states[idx].env = self.envs[idx] = self.make_env(idx)
else:
raise obs
else:
assert isinstance(obs, dict), "Not a multi-agent obs dict!"
if obs is not None:
if idx in self.terminateds:
self.terminateds.remove(idx)
if idx in self.truncateds:
self.truncateds.remove(idx)
ret_obs[idx] = obs
ret_infos[idx] = infos
return ret_obs, ret_infos
@override(BaseEnv)
def try_restart(self, env_id: Optional[EnvID] = None) -> None:
if isinstance(env_id, int):
env_id = [env_id]
if env_id is None:
env_id = list(range(len(self.envs)))
for idx in env_id:
# Try closing down the old (possibly faulty) sub-env, but ignore errors.
try:
self.envs[idx].close()
except Exception as e:
if log_once("close_sub_env"):
logger.warning(
"Trying to close old and replaced sub-environment (at vector "
f"index={idx}), but closing resulted in error:\n{e}"
)
# Try recreating the sub-env.
logger.warning(f"Trying to restart sub-environment at index {idx}.")
self.env_states[idx].env = self.envs[idx] = self.make_env(idx)
logger.warning(f"Sub-environment at index {idx} restarted successfully.")
@override(BaseEnv)
def get_sub_environments(
self, as_dict: bool = False
) -> Union[Dict[str, EnvType], List[EnvType]]:
if as_dict:
return {_id: env_state.env for _id, env_state in enumerate(self.env_states)}
return [state.env for state in self.env_states]
@override(BaseEnv)
def try_render(self, env_id: Optional[EnvID] = None) -> None:
if env_id is None:
env_id = 0
assert isinstance(env_id, int)
return self.envs[env_id].render()
@property
@override(BaseEnv)
def observation_space(self) -> gym.spaces.Dict:
return self.envs[0].observation_space
@property
@override(BaseEnv)
def action_space(self) -> gym.Space:
return self.envs[0].action_space
@override(BaseEnv)
def get_agent_ids(self) -> Set[AgentID]:
return self.envs[0].get_agent_ids()
def _init_env_state(self, idx: Optional[int] = None) -> None:
"""Resets all or one particular sub-environment's state (by index).
Args:
idx: The index to reset at. If None, reset all the sub-environments' states.
"""
# If index is None, reset all sub-envs' states:
if idx is None:
self.env_states = [
_MultiAgentEnvState(env, self.restart_failed_sub_environments)
for env in self.envs
]
# Index provided, reset only the sub-env's state at the given index.
else:
assert isinstance(idx, int)
self.env_states[idx] = _MultiAgentEnvState(
self.envs[idx], self.restart_failed_sub_environments
)
@OldAPIStack
| MultiAgentEnvWrapper |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr.py | {
"start": 85583,
"end": 94455
} | class ____(RTDetrPreTrainedModel):
# When using clones, all layers > 0 will be clones, but layer 0 *is* required
# We can't initialize the model on meta device as some weights are modified during the initialization
_no_split_modules = None
def __init__(self, config: RTDetrConfig):
super().__init__(config)
self.model = RTDetrModel(config)
num_pred = config.decoder_layers
self.model.decoder.class_embed = nn.ModuleList(
[torch.nn.Linear(config.d_model, config.num_labels) for _ in range(num_pred)]
)
self.model.decoder.bbox_embed = nn.ModuleList(
[RTDetrMLPPredictionHead(config, config.d_model, config.d_model, 4, num_layers=3) for _ in range(num_pred)]
)
# if two-stage, the last class_embed and bbox_embed is for region proposal generation
self.post_init()
def _set_aux_loss(self, outputs_class, outputs_coord):
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class, outputs_coord)]
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[list[dict]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.FloatTensor], RTDetrObjectDetectionOutput]:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import RTDetrImageProcessor, RTDetrForObjectDetection
>>> from PIL import Image
>>> import requests
>>> import torch
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = RTDetrImageProcessor.from_pretrained("PekingU/rtdetr_r50vd")
>>> model = RTDetrForObjectDetection.from_pretrained("PekingU/rtdetr_r50vd")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 300, 80]
>>> boxes = outputs.pred_boxes
>>> list(boxes.shape)
[1, 300, 4]
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
... 0
... ]
>>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected sofa with confidence 0.97 at location [0.14, 0.38, 640.13, 476.21]
Detected cat with confidence 0.96 at location [343.38, 24.28, 640.14, 371.5]
Detected cat with confidence 0.958 at location [13.23, 54.18, 318.98, 472.22]
Detected remote with confidence 0.951 at location [40.11, 73.44, 175.96, 118.48]
Detected remote with confidence 0.924 at location [333.73, 76.58, 369.97, 186.99]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
denoising_meta_values = (
outputs.denoising_meta_values if return_dict else outputs[-1] if self.training else None
)
outputs_class = outputs.intermediate_logits if return_dict else outputs[2]
outputs_coord = outputs.intermediate_reference_points if return_dict else outputs[3]
predicted_corners = outputs.intermediate_predicted_corners if return_dict else outputs[4]
initial_reference_points = outputs.initial_reference_points if return_dict else outputs[5]
logits = outputs_class[:, -1]
pred_boxes = outputs_coord[:, -1]
loss, loss_dict, auxiliary_outputs, enc_topk_logits, enc_topk_bboxes = None, None, None, None, None
if labels is not None:
enc_topk_logits = outputs.enc_topk_logits if return_dict else outputs[-5]
enc_topk_bboxes = outputs.enc_topk_bboxes if return_dict else outputs[-4]
loss, loss_dict, auxiliary_outputs = self.loss_function(
logits,
labels,
self.device,
pred_boxes,
self.config,
outputs_class,
outputs_coord,
enc_topk_logits=enc_topk_logits,
enc_topk_bboxes=enc_topk_bboxes,
denoising_meta_values=denoising_meta_values,
predicted_corners=predicted_corners,
initial_reference_points=initial_reference_points,
**kwargs,
)
if not return_dict:
if auxiliary_outputs is not None:
output = (logits, pred_boxes) + (auxiliary_outputs,) + outputs
else:
output = (logits, pred_boxes) + outputs
return ((loss, loss_dict) + output) if loss is not None else output
return RTDetrObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
auxiliary_outputs=auxiliary_outputs,
last_hidden_state=outputs.last_hidden_state,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_logits=outputs.intermediate_logits,
intermediate_reference_points=outputs.intermediate_reference_points,
intermediate_predicted_corners=outputs.intermediate_predicted_corners,
initial_reference_points=outputs.initial_reference_points,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
init_reference_points=outputs.init_reference_points,
enc_topk_logits=outputs.enc_topk_logits,
enc_topk_bboxes=outputs.enc_topk_bboxes,
enc_outputs_class=outputs.enc_outputs_class,
enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
denoising_meta_values=outputs.denoising_meta_values,
)
__all__ = [
"RTDetrForObjectDetection",
"RTDetrModel",
"RTDetrPreTrainedModel",
]
| RTDetrForObjectDetection |
python | django__django | django/contrib/auth/password_validation.py | {
"start": 3148,
"end": 5195
} | class ____:
"""
Validate that the password is of a minimum length.
"""
def __init__(self, min_length=8):
self.min_length = min_length
def validate(self, password, user=None):
if len(password) < self.min_length:
raise ValidationError(
self.get_error_message(),
code="password_too_short",
params={"min_length": self.min_length},
)
def get_error_message(self):
return (
ngettext(
"This password is too short. It must contain at least %d character.",
"This password is too short. It must contain at least %d characters.",
self.min_length,
)
% self.min_length
)
def get_help_text(self):
return ngettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length,
) % {"min_length": self.min_length}
def exceeds_maximum_length_ratio(password, max_similarity, value):
"""
Test that value is within a reasonable range of password.
The following ratio calculations are based on testing SequenceMatcher like
this:
for i in range(0,6):
print(10**i, SequenceMatcher(a='A', b='A'*(10**i)).quick_ratio())
which yields:
1 1.0
10 0.18181818181818182
100 0.019801980198019802
1000 0.001998001998001998
10000 0.00019998000199980003
100000 1.999980000199998e-05
This means a length_ratio of 10 should never yield a similarity higher than
0.2, for 100 this is down to 0.02 and for 1000 it is 0.002. This can be
calculated via 2 / length_ratio. As a result we avoid the potentially
expensive sequence matching.
"""
pwd_len = len(password)
length_bound_similarity = max_similarity / 2 * pwd_len
value_len = len(value)
return pwd_len >= 10 * value_len and value_len < length_bound_similarity
| MinimumLengthValidator |
python | pytorch__pytorch | test/dynamo/test_sources.py | {
"start": 281,
"end": 2058
} | class ____(torch._dynamo.test_case.TestCase):
def test_is_local(self):
x_src = LocalSource("x")
y_src = GlobalSource("y")
attr_x_a = AttrSource(x_src, "a")
attr_y_b = AttrSource(y_src, "b")
self.assertTrue(is_from_local_source(attr_x_a))
self.assertEqual(is_from_local_source(attr_y_b), False)
def test_property_closure(self):
def external_property():
closed_value = 7
def internal_function(self):
return closed_value
return internal_function
class Elements:
myprop = property(external_property())
def func(elements):
if not elements.myprop:
return torch.tensor([1, 2, 3])
else:
return torch.tensor([4, 5, 6])
e = Elements()
a = func(e)
b = torch.compile(func, backend="eager", fullgraph=True)(e)
self.assertEqual(a, b)
def test_supported_nodes(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.randn(10, 10)
def forward(self):
if (
torch.utils._pytree.SUPPORTED_NODES[CausalLMOutputWithPast].type
is int
):
x = torch.sin(self.x)
else:
x = torch.cos(self.x)
return x
torch.utils._pytree.register_pytree_node(
CausalLMOutputWithPast,
lambda x: ((), None),
lambda x, _: CausalLMOutputWithPast(),
)
torch.export.export(Model(), (), strict=True)
if __name__ == "__main__":
torch._dynamo.test_case.run_tests()
| SourceTests |
python | huggingface__transformers | tests/models/oneformer/test_processing_oneformer.py | {
"start": 6984,
"end": 34355
} | class ____(unittest.TestCase):
processing_class = OneFormerProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_feat_extracttion_common.test_feat_extract_to_json_string
feature_extraction_class = processing_class
def setUp(self):
self.processing_tester = OneFormerProcessorTester(self)
@property
def processor_dict(self):
return self.processing_tester.prepare_processor_dict()
def test_feat_extract_properties(self):
processor = self.processing_class(**self.processor_dict)
self.assertTrue(hasattr(processor, "image_processor"))
self.assertTrue(hasattr(processor, "tokenizer"))
self.assertTrue(hasattr(processor, "max_seq_length"))
self.assertTrue(hasattr(processor, "task_seq_length"))
@unittest.skip
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize processor
processor = self.processing_class(**self.processor_dict)
# create random PIL images
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs
)
self.assertEqual(
encoded_images.shape,
(1, self.processing_tester.num_channels, expected_height, expected_width),
)
tokenized_task_inputs = processor(image_inputs[0], ["semantic"], return_tensors="pt").task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(1, expected_sequence_length),
)
# Test batched
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs, batched=True
)
encoded_images = processor(image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.processing_tester.batch_size,
self.processing_tester.num_channels,
expected_height,
expected_width,
),
)
tokenized_task_inputs = processor(
image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt"
).task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(self.processing_tester.batch_size, expected_sequence_length),
)
def test_call_numpy(self):
# Initialize processor
processor = self.processing_class(**self.processor_dict)
# create random numpy tensors
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs
)
self.assertEqual(
encoded_images.shape,
(1, self.processing_tester.num_channels, expected_height, expected_width),
)
tokenized_task_inputs = processor(image_inputs[0], ["semantic"], return_tensors="pt").task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(1, expected_sequence_length),
)
# Test batched
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs, batched=True
)
encoded_images = processor(image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.processing_tester.batch_size,
self.processing_tester.num_channels,
expected_height,
expected_width,
),
)
tokenized_task_inputs = processor(
image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt"
).task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(self.processing_tester.batch_size, expected_sequence_length),
)
def test_call_pytorch(self):
# Initialize processor
processor = self.processing_class(**self.processor_dict)
# create random PyTorch tensors
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs
)
self.assertEqual(
encoded_images.shape,
(1, self.processing_tester.num_channels, expected_height, expected_width),
)
tokenized_task_inputs = processor(image_inputs[0], ["semantic"], return_tensors="pt").task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(1, expected_sequence_length),
)
# Test batched
expected_height, expected_width, expected_sequence_length = self.processing_tester.get_expected_values(
image_inputs, batched=True
)
encoded_images = processor(image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.processing_tester.batch_size,
self.processing_tester.num_channels,
expected_height,
expected_width,
),
)
tokenized_task_inputs = processor(
image_inputs, ["semantic"] * len(image_inputs), return_tensors="pt"
).task_inputs
self.assertEqual(
tokenized_task_inputs.shape,
(self.processing_tester.batch_size, expected_sequence_length),
)
def comm_get_processor_inputs(self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np"):
processor = self.processing_class(**self.processor_dict)
# prepare image and target
num_labels = self.processing_tester.num_labels
annotations = None
instance_id_to_semantic_id = None
image_inputs = self.processing_tester.prepare_image_inputs(equal_resolution=False)
if with_segmentation_maps:
high = num_labels
if is_instance_map:
labels_expanded = list(range(num_labels)) * 2
instance_id_to_semantic_id = dict(enumerate(labels_expanded))
annotations = [
np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs
]
if segmentation_type == "pil":
annotations = [Image.fromarray(annotation) for annotation in annotations]
inputs = processor(
image_inputs,
["semantic"] * len(image_inputs),
annotations,
return_tensors="pt",
instance_id_to_semantic_id=instance_id_to_semantic_id,
pad_and_return_pixel_mask=True,
)
return inputs
@unittest.skip
def test_init_without_params(self):
pass
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
feat_extract_first.save_pretrained(tmpdirname)
check_json_file_has_correct_format(os.path.join(tmpdirname, "processor_config.json"))
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
self.assertEqual(feat_extract_second.image_processor.to_dict(), feat_extract_first.image_processor.to_dict())
self.assertIsInstance(feat_extract_first.image_processor, OneFormerImageProcessor)
self.assertIsInstance(feat_extract_first.tokenizer, CLIPTokenizer)
def test_call_with_segmentation_maps(self):
def common(is_instance_map=False, segmentation_type=None):
inputs = self.comm_get_processor_inputs(
with_segmentation_maps=True, is_instance_map=is_instance_map, segmentation_type=segmentation_type
)
mask_labels = inputs["mask_labels"]
class_labels = inputs["class_labels"]
pixel_values = inputs["pixel_values"]
text_inputs = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(mask_labels, class_labels, text_inputs):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
self.assertEqual(text_input.shape[0], self.processing_tester.num_text)
common()
common(is_instance_map=True)
common(is_instance_map=False, segmentation_type="pil")
common(is_instance_map=True, segmentation_type="pil")
def test_integration_semantic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = processor.encode_inputs(
pixel_values_list,
["semantic", "semantic"],
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values, task inputs, text inputs and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
self.assertEqual(inputs["task_inputs"].shape, (2, 77))
self.assertEqual(inputs["text_inputs"].shape, (2, self.processing_tester.num_text, 77))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 12, 3, 5, 0, 43, 96, 104, 31, 125, 138, 87, 149]) # noqa: E231 # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 67, 82, 17, 12, 42, 3, 14, 5, 0, 115, 43, 8, 138, 125, 143]) # noqa: E231 # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
self.assertEqual(inputs["task_inputs"][0].sum().item(), 141082)
self.assertEqual(inputs["task_inputs"][0].sum().item(), inputs["task_inputs"][1].sum().item())
# verify the text inputs
self.assertEqual(len(inputs["text_inputs"]), 2)
self.assertEqual(inputs["text_inputs"][0].sum().item(), 1095752)
self.assertEqual(inputs["text_inputs"][1].sum().item(), 1062468)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (16, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (16, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_integration_instance_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = processor.encode_inputs(
pixel_values_list,
["instance", "instance"],
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values, task inputs, text inputs and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
self.assertEqual(inputs["task_inputs"].shape, (2, 77))
self.assertEqual(inputs["text_inputs"].shape, (2, self.processing_tester.num_text, 77))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 43, 43, 43, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 12, 12, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
self.assertEqual(inputs["task_inputs"][0].sum().item(), 144985)
self.assertEqual(inputs["task_inputs"][0].sum().item(), inputs["task_inputs"][1].sum().item())
# verify the text inputs
self.assertEqual(len(inputs["text_inputs"]), 2)
self.assertEqual(inputs["text_inputs"][0].sum().item(), 1037040)
self.assertEqual(inputs["text_inputs"][1].sum().item(), 1044078)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (73, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (57, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 35040.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 98228.0)
def test_integration_panoptic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = processor.encode_inputs(
pixel_values_list,
["panoptic", "panoptic"],
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values, task inputs, text inputs and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
self.assertEqual(inputs["task_inputs"].shape, (2, 77))
self.assertEqual(inputs["text_inputs"].shape, (2, self.processing_tester.num_text, 77))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
self.assertEqual(inputs["task_inputs"][0].sum().item(), 136240)
self.assertEqual(inputs["task_inputs"][0].sum().item(), inputs["task_inputs"][1].sum().item())
# verify the text inputs
self.assertEqual(len(inputs["text_inputs"]), 2)
self.assertEqual(inputs["text_inputs"][0].sum().item(), 1048653)
self.assertEqual(inputs["text_inputs"][1].sum().item(), 1067160)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))
fake_binary_mask[0, 20:] = 1
fake_binary_mask[1, :15] = 1
fake_binary_mask[5, :10] = 1
rle = binary_mask_to_rle(fake_binary_mask)
self.assertEqual(len(rle), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def test_post_process_semantic_segmentation(self):
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
outputs = self.processing_tester.get_fake_oneformer_outputs()
segmentation = processor.post_process_semantic_segmentation(outputs)
self.assertEqual(len(segmentation), self.processing_tester.batch_size)
self.assertEqual(
segmentation[0].shape,
(
self.processing_tester.height,
self.processing_tester.width,
),
)
target_sizes = [(1, 4) for i in range(self.processing_tester.batch_size)]
segmentation = processor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def test_post_process_instance_segmentation(self):
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
outputs = self.processing_tester.get_fake_oneformer_outputs()
segmentation = processor.post_process_instance_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (self.processing_tester.height, self.processing_tester.width))
def test_post_process_panoptic_segmentation(self):
image_processor = OneFormerImageProcessor(
do_reduce_labels=True,
ignore_index=0,
size=(512, 512),
class_info_file="ade20k_panoptic.json",
num_text=self.processing_tester.num_text,
)
tokenizer = CLIPTokenizer.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
processor = OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
max_seq_length=77,
task_seq_length=77,
)
outputs = self.processing_tester.get_fake_oneformer_outputs()
segmentation = processor.post_process_panoptic_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (self.processing_tester.height, self.processing_tester.width))
| OneFormerProcessingTest |
python | FactoryBoy__factory_boy | tests/test_alchemy.py | {
"start": 951,
"end": 1291
} | class ____(SQLAlchemyModelFactory):
class Meta:
model = models.MultiFieldModel
sqlalchemy_get_or_create = ('slug',)
sqlalchemy_session = models.session
sqlalchemy_session_persistence = 'commit'
id = factory.Sequence(lambda n: n)
foo = factory.Sequence(lambda n: 'foo%d' % n)
| MultifieldModelFactory |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 85631,
"end": 87580
} | class ____(CythonTransform):
"""
Declare all global cdef names that we allow referencing in other places,
before declaring everything (else) in source code order.
"""
def visit_CompilerDirectivesNode(self, node):
env = self.module_scope
old = env.directives
env.directives = node.directives
self.visitchildren(node)
env.directives = old
return node
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.module_scope.directives = node.directives
self.visitchildren(node)
return node
def visit_CDefExternNode(self, node):
old_cinclude_flag = self.module_scope.in_cinclude
self.module_scope.in_cinclude = 1
self.visitchildren(node)
self.module_scope.in_cinclude = old_cinclude_flag
return node
def visit_CEnumDefNode(self, node):
node.declare(self.module_scope)
return node
def visit_CStructOrUnionDefNode(self, node):
if node.name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
# Expand fused methods of .pxd declared types to construct the final vtable order.
type = self.module_scope.entries[node.class_name].type
if type is not None and type.is_extension_type and not type.is_builtin_type and type.scope:
scope = type.scope
for entry in scope.cfunc_entries:
if entry.type and entry.type.is_fused:
entry.type.get_all_specialized_function_types()
return node
def visit_FuncDefNode(self, node):
# no traversal needed
return node
def visit_PyClassDefNode(self, node):
# no traversal needed
return node
| ForwardDeclareTypes |
python | pypa__pipenv | tests/integration/conftest.py | {
"start": 5654,
"end": 11025
} | class ____:
"""An instance of a Pipenv Project..."""
def __init__(self, pipfile=True, capfd=None, index_url=None):
self.index_url = index_url
self.pypi = None
self.env = {}
self.capfd = capfd
if self.index_url is not None:
self.pypi, _, _ = self.index_url.rpartition("/") if self.index_url else ""
self.env["PYTHONWARNINGS"] = "ignore:DEPRECATION"
os.environ.pop("PIPENV_CUSTOM_VENV_NAME", None)
self.original_dir = Path(__file__).parent.parent.parent
self._path = TemporaryDirectory(prefix="pipenv-", suffix="-tests")
path = Path(self._path.name)
try:
self.path = str(path.resolve())
except OSError:
self.path = str(path.absolute())
os.chdir(self.path)
# set file creation perms
self.pipfile_path = None
p_path = os.sep.join([self.path, "Pipfile"])
self.pipfile_path = Path(p_path)
if pipfile:
with contextlib.suppress(FileNotFoundError):
os.remove(p_path)
with open(p_path, "a"):
os.utime(p_path, None)
self._pipfile = _Pipfile(Path(p_path), index=self.index_url)
else:
self._pipfile = None
def __enter__(self):
return self
def __exit__(self, *args):
if self.pipfile_path:
with contextlib.suppress(OSError):
os.remove(self.pipfile_path)
# Make sure we change back to the original directory before cleanup
os.chdir(self.original_dir)
self.path = None
self._path = None
def run_command(self, cmd):
result = subprocess.run(cmd, shell=True, capture_output=True, check=False)
try:
std_out_decoded = result.stdout.decode("utf-8")
except UnicodeDecodeError:
std_out_decoded = result.stdout
result.stdout = std_out_decoded
try:
std_err_decoded = result.stderr.decode("utf-8")
except UnicodeDecodeError:
std_err_decoded = result.stderr
result.stderr = std_err_decoded
return result
def pipenv(self, cmd, block=True):
self.capfd.readouterr()
r = self.run_command(f"pipenv {cmd}")
# Pretty output for failing tests.
out, err = self.capfd.readouterr()
if out:
r.stdout_bytes = r.stdout_bytes + out
if err:
r.stderr_bytes = r.stderr_bytes + err
if block:
print(f"$ pipenv {cmd}")
print(r.stdout)
print(r.stderr, file=sys.stderr)
if r.returncode != 0:
print("Command failed...")
# Where the action happens.
return r
@property
def pipfile(self):
p_path = os.sep.join([self.path, "Pipfile"])
with open(p_path) as f:
return tomlkit.loads(f.read())
@property
def lockfile(self):
p_path = self.lockfile_path
with open(p_path) as f:
return json.loads(f.read())
@property
def lockfile_path(self):
return Path(os.sep.join([self.path, "Pipfile.lock"]))
if sys.version_info[:2] <= (3, 8):
# Windows python3.8 fails without this patch. Additional details: https://bugs.python.org/issue42796
def _rmtree_func(path, ignore_errors=True, onerror=None):
shutil_rmtree = _rmtree
if onerror is None:
onerror = handle_remove_readonly
try:
shutil_rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
except (OSError, FileNotFoundError, PermissionError) as exc:
# Ignore removal failures where the file doesn't exist
if exc.errno != errno.ENOENT:
raise
else:
_rmtree_func = _rmtree
@pytest.fixture()
def pipenv_instance_pypi(capfdbinary, monkeypatch):
with temp_environ(), monkeypatch.context() as m:
m.setattr(shutil, "rmtree", _rmtree_func)
original_umask = os.umask(0o007)
os.environ["PIPENV_NOSPIN"] = "1"
os.environ["CI"] = "1"
os.environ["PIPENV_DONT_USE_PYENV"] = "1"
warnings.simplefilter("ignore", category=ResourceWarning)
warnings.filterwarnings(
"ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>"
)
try:
yield functools.partial(
_PipenvInstance, capfd=capfdbinary, index_url="https://pypi.org/simple"
)
finally:
os.umask(original_umask)
@pytest.fixture()
def pipenv_instance_private_pypi(capfdbinary, monkeypatch):
with temp_environ(), monkeypatch.context() as m:
m.setattr(shutil, "rmtree", _rmtree_func)
original_umask = os.umask(0o007)
os.environ["PIPENV_NOSPIN"] = "1"
os.environ["CI"] = "1"
os.environ["PIPENV_DONT_USE_PYENV"] = "1"
warnings.simplefilter("ignore", category=ResourceWarning)
warnings.filterwarnings(
"ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>"
)
try:
yield functools.partial(
_PipenvInstance, capfd=capfdbinary, index_url=DEFAULT_PRIVATE_PYPI_SERVER
)
finally:
os.umask(original_umask)
@pytest.fixture()
def testsroot():
return TESTS_ROOT
| _PipenvInstance |
python | getsentry__sentry | tests/sentry/issues/escalating/test_escalating.py | {
"start": 10093,
"end": 15402
} | class ____(BaseGroupCounts):
def save_mock_escalating_group_forecast(
self, group: Group, forecast_values: list[int], date_added: datetime
) -> None:
"""Save mock data for escalating group forecast in nodestore"""
escalating_forecast = EscalatingGroupForecast(
project_id=group.project.id,
group_id=group.id,
forecast=forecast_values,
date_added=date_added,
)
escalating_forecast.save()
def archive_until_escalating(self, group: Group) -> None:
group.status = GroupStatus.IGNORED
group.substatus = GroupSubStatus.UNTIL_ESCALATING
group.save()
@pytest.mark.skip(reason="flaky: #93732")
@freeze_time(TIME_YESTERDAY)
def test_is_escalating_issue(self) -> None:
"""Test when an archived until escalating issue starts escalating"""
# The group had 6 events in the last hour
event = self._create_events_for_group(count=6)
assert event.group is not None
archived_group = event.group
self.archive_until_escalating(archived_group)
# The escalating forecast for today is 5, thus, it should escalate
forecast_values = [5] + [6] * 13
self.save_mock_escalating_group_forecast(
group=archived_group, forecast_values=forecast_values, date_added=datetime.now()
)
assert is_escalating(archived_group) == (True, 5)
# Test cache
assert cache.get(f"hourly-group-count:{archived_group.project.id}:{archived_group.id}") == 6
@freeze_time(TIME_YESTERDAY)
def test_not_escalating_issue(self) -> None:
"""Test when an archived until escalating issue is not escalating"""
# Group 1 had 4 events yesterday
self._create_events_for_group(count=4, hours_ago=24)
# Group 2 had 5 events today
event = self._create_events_for_group(count=5, group="group-escalating")
assert event.group is not None
group = event.group
self.archive_until_escalating(group)
# The escalating forecast for today is 6 (since date_added was one day ago)
forecast_values = [5] + [6] * 13
self.save_mock_escalating_group_forecast(
group=group,
forecast_values=forecast_values,
date_added=datetime.now() - timedelta(days=1),
)
assert is_escalating(group) == (False, None)
assert group.substatus == GroupSubStatus.UNTIL_ESCALATING
assert group.status == GroupStatus.IGNORED
assert not GroupInbox.objects.filter(group=group).exists()
@freeze_time(TIME_YESTERDAY.replace(minute=12, second=40, microsecond=0))
def test_hourly_count_query(self) -> None:
"""Test the hourly count query only aggregates events from within the current hour"""
self._create_events_for_group(count=2, hours_ago=1) # An hour ago -> It will not count
group = self._create_events_for_group(count=1).group # This hour -> It will count
assert group is not None
# Events are aggregated in the hourly count query by date rather than the last 24hrs
assert get_group_hourly_count(group) == 1
@freeze_time(TIME_YESTERDAY)
def test_is_forecast_out_of_range(self) -> None:
"""
Test that when an archived until escalating issue does not have a forecast that is in range,
the last forecast is used as a fallback and an error is reported
"""
with patch("sentry.issues.escalating.escalating_group_forecast.logger") as logger:
event = self._create_events_for_group(count=2)
assert event.group is not None
archived_group = event.group
self.archive_until_escalating(archived_group)
# The escalating forecast was added 15 days ago, and thus is out of the 14 day range
forecast_values = [10] * 13 + [1]
self.save_mock_escalating_group_forecast(
group=archived_group,
forecast_values=forecast_values,
date_added=datetime.now() - timedelta(15),
)
assert is_escalating(archived_group) == (True, 1)
logger.error.assert_called_once()
@pytest.mark.skip(reason="flaky: #94622")
@freeze_time(TIME_YESTERDAY)
def test_is_escalating_two_weeks(self) -> None:
"""
Test when an archived until escalating issue starts escalating after exactly 2 weeks.
This can happen when the previous nodestore forecast hasn't expired yet.
"""
# The group had 6 events in the last hour
event = self._create_events_for_group(count=6)
assert event.group is not None
archived_group = event.group
self.archive_until_escalating(archived_group)
# The escalating forecast for today is 5, thus, it should escalate
forecast_values = [5] * 14
self.save_mock_escalating_group_forecast(
group=archived_group,
forecast_values=forecast_values,
date_added=TIME_YESTERDAY - timedelta(days=14),
)
assert is_escalating(archived_group) == (True, 5)
# Test cache
assert cache.get(f"hourly-group-count:{archived_group.project.id}:{archived_group.id}") == 6
| DailyGroupCountsEscalating |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_resolved_email.py | {
"start": 217,
"end": 384
} | class ____(ActivityMailDebugView):
def get_activity(self, request: HttpRequest, event):
return {"type": ActivityType.SET_RESOLVED.value}
| DebugResolvedEmailView |
python | getsentry__sentry | tests/sentry/tempest/endpoints/test_tempest_credentials_details.py | {
"start": 146,
"end": 2482
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-tempest-credentials-details"
def setUp(self) -> None:
super().setUp()
self.tempest_credentials = self.create_tempest_credentials(self.project)
def test_cant_access_endpoint_if_feature_flag_is_disabled(self) -> None:
self.login_as(self.user)
response = self.get_response(
self.project.organization.slug,
self.project.slug,
self.tempest_credentials.id,
method="DELETE",
)
assert response.status_code == 404
def test_cant_access_endpoint_if_user_is_not_authenticated(self) -> None:
response = self.get_response(
self.project.organization.slug,
self.project.slug,
self.tempest_credentials.id,
method="DELETE",
)
assert response.status_code == 401
@patch(
"sentry.tempest.endpoints.tempest_credentials_details.TempestCredentialsDetailsEndpoint.create_audit_entry"
)
def test_delete_tempest_credentials_as_org_admin(self, create_audit_entry: MagicMock) -> None:
self.organization.update_option("sentry:enabled_console_platforms", ["playstation"])
self.login_as(self.user)
response = self.get_response(
self.project.organization.slug,
self.project.slug,
self.tempest_credentials.id,
method="DELETE",
)
assert response.status_code == 204
assert not TempestCredentials.objects.filter(id=self.tempest_credentials.id).exists()
create_audit_entry.assert_called()
def test_non_admin_cant_delete_credentials(self) -> None:
non_admin_user = self.create_user()
self.create_member(
user=non_admin_user, organization=self.project.organization, role="member"
)
self.organization.update_option("sentry:enabled_console_platforms", ["playstation"])
self.login_as(non_admin_user)
response = self.get_response(
self.project.organization.slug,
self.project.slug,
self.tempest_credentials.id,
method="DELETE",
)
assert response.status_code == 403
assert TempestCredentials.objects.filter(id=self.tempest_credentials.id).exists()
| TestTempestCredentialsDetails |
python | PrefectHQ__prefect | tests/utilities/test_annotations.py | {
"start": 160,
"end": 351
} | class ____:
def test_always_returns_same_value(self):
thing = unmapped("hello")
for _ in range(10):
assert thing[random.randint(0, 100)] == "hello"
| TestUnmapped |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 72142,
"end": 72490
} | class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
actions: Annotated[
list[
BulkCreateActionBulkTaskInstanceBody
| BulkUpdateActionBulkTaskInstanceBody
| BulkDeleteActionBulkTaskInstanceBody
],
Field(title="Actions"),
]
| BulkBodyBulkTaskInstanceBody |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 141932,
"end": 143318
} | class ____:
def test_ordinal_number(self):
assert self.locale.ordinal_number(1) == "1."
def test_define(self):
assert self.locale.describe("minute", only_distance=True) == "eng Minutt"
assert self.locale.describe("minute", only_distance=False) == "an enger Minutt"
assert self.locale.describe("hour", only_distance=True) == "eng Stonn"
assert self.locale.describe("hour", only_distance=False) == "an enger Stonn"
assert self.locale.describe("day", only_distance=True) == "een Dag"
assert self.locale.describe("day", only_distance=False) == "an engem Dag"
assert self.locale.describe("week", only_distance=True) == "eng Woch"
assert self.locale.describe("week", only_distance=False) == "an enger Woch"
assert self.locale.describe("month", only_distance=True) == "ee Mount"
assert self.locale.describe("month", only_distance=False) == "an engem Mount"
assert self.locale.describe("year", only_distance=True) == "ee Joer"
assert self.locale.describe("year", only_distance=False) == "an engem Joer"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "Samschdeg"
assert self.locale.day_abbreviation(dt.isoweekday()) == "Sam"
@pytest.mark.usefixtures("lang_locale")
| TestLuxembourgishLocale |
python | ZoranPandovski__al-go-rithms | data_structures/Linked_list/Python/Odd_Even_Linked_List.py | {
"start": 166,
"end": 709
} | class ____(object):
def oddEvenList(self, head):
if head is None: return None
if head.next is None: return head
o = head
p = o.next
ehead = p
while p.next is not None:
o.next = p.next
p.next = p.next.next
o = o.next
p = p.next
if p is None: break
o.next = ehead
return head
'''
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
----------------------
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
'''
| Solution |
python | getsentry__sentry | src/sentry/snuba/events.py | {
"start": 294,
"end": 30145
} | class ____(Enum):
"""
Value is a tuple of (internal Events name, internal Transaction name, internal
Discover name, external alias)
None means the column is not available in that dataset.
Always use keyword arguments to declare columns for legibility.
"""
EVENT_ID = Column(
group_name="events.event_id",
event_name="event_id",
transaction_name="event_id",
discover_name="event_id",
issue_platform_name="event_id",
alias="id",
)
GROUP_ID = Column(
group_name="events.group_id",
event_name="group_id",
transaction_name=None,
discover_name="group_id",
issue_platform_name="group_id",
alias="issue.id",
)
ISSUE_STATUS = Column(
group_name="status",
event_name="status",
transaction_name=None,
discover_name=None,
issue_platform_name="status",
alias="status",
)
# This is needed to query transactions by group id
# in the Issue Details page. This will not be
# exposed to users through discover search.
GROUP_IDS = Column(
group_name=None,
event_name="group_ids",
transaction_name="group_ids",
discover_name="group_ids",
alias="performance.issue_ids",
)
OCCURRENCE_ID = Column(
group_name=None,
event_name="occurrence_id",
transaction_name=None,
discover_name=None,
issue_platform_name="occurrence_id",
alias="occurrence_id",
)
OCCURRENCE_TYPE_ID = Column(
group_name=None,
event_name=None,
transaction_name=None,
discover_name=None,
issue_platform_name="occurrence_type_id",
alias="occurrence_type_id",
)
PROJECT_ID = Column(
group_name="events.project_id",
event_name="project_id",
transaction_name="project_id",
discover_name="project_id",
issue_platform_name="project_id",
alias="project.id",
)
TIMESTAMP = Column(
group_name="events.timestamp",
event_name="timestamp",
transaction_name="finish_ts",
discover_name="timestamp",
issue_platform_name="timestamp",
alias="timestamp",
)
TIME = Column(
group_name="events.time",
event_name="time",
transaction_name="bucketed_end",
discover_name="time",
alias="time",
)
CULPRIT = Column(
group_name="events.culprit",
event_name="culprit",
transaction_name=None,
discover_name="culprit",
issue_platform_name="culprit",
alias="culprit",
)
LOCATION = Column(
group_name="events.location",
event_name="location",
transaction_name=None,
discover_name="location",
issue_platform_name=None,
alias="location",
)
MESSAGE = Column(
group_name="events.message",
event_name="message",
transaction_name="transaction_name",
discover_name="message",
issue_platform_name="message",
alias="message",
)
PLATFORM = Column(
group_name="events.platform",
event_name="platform",
transaction_name="platform",
discover_name="platform",
issue_platform_name="platform",
alias="platform.name",
)
ENVIRONMENT = Column(
group_name="events.environment",
event_name="environment",
transaction_name="environment",
discover_name="environment",
issue_platform_name="environment",
alias="environment",
)
RELEASE = Column(
group_name="events.tags[sentry:release]",
event_name="tags[sentry:release]",
transaction_name="release",
discover_name="release",
issue_platform_name="release",
alias="release",
)
DIST = Column(
group_name="events.tags[sentry:dist]",
event_name="tags[sentry:dist]",
transaction_name="dist",
discover_name="dist",
issue_platform_name="dist",
alias="dist",
)
TITLE = Column(
group_name="events.title",
event_name="title",
transaction_name="transaction_name",
discover_name="title",
issue_platform_name="search_title",
alias="title",
)
SUBTITLE = Column(
group_name=None,
event_name=None,
transaction_name=None,
discover_name=None,
issue_platform_name="subtitle",
alias="subtitle",
)
TYPE = Column(
group_name="events.type",
event_name="type",
transaction_name=None,
discover_name="type",
alias="event.type",
)
TAGS_KEY = Column(
group_name="events.tags.key",
event_name="tags.key",
transaction_name="tags.key",
discover_name="tags.key",
issue_platform_name="tags.key",
alias="tags.key",
)
TAGS_VALUE = Column(
group_name="events.tags.value",
event_name="tags.value",
transaction_name="tags.value",
discover_name="tags.value",
issue_platform_name="tags.value",
alias="tags.value",
)
TAGS_KEYS = Column(
group_name="events.tags_key",
event_name="tags_key",
transaction_name="tags_key",
discover_name="tags_key",
issue_platform_name="tags_key",
alias="tags_key",
)
TAGS_VALUES = Column(
group_name="events.tags_value",
event_name="tags_value",
transaction_name="tags_value",
discover_name="tags_value",
issue_platform_name="tags_value",
alias="tags_value",
)
TRANSACTION = Column(
group_name="events.transaction",
event_name="transaction",
transaction_name="transaction_name",
discover_name="transaction",
issue_platform_name="transaction_name",
alias="transaction",
)
USER = Column(
group_name="events.tags[sentry:user]",
event_name="tags[sentry:user]",
transaction_name="user",
discover_name="user",
issue_platform_name="user",
alias="user",
)
USER_ID = Column(
group_name="events.user_id",
event_name="user_id",
transaction_name="user_id",
discover_name="user_id",
issue_platform_name="user_id",
alias="user.id",
)
USER_EMAIL = Column(
group_name="events.email",
event_name="email",
transaction_name="user_email",
discover_name="email",
issue_platform_name="user_email",
alias="user.email",
)
USER_USERNAME = Column(
group_name="events.username",
event_name="username",
transaction_name="user_name",
discover_name="username",
issue_platform_name="user_name",
alias="user.username",
)
USER_IP_ADDRESS = Column(
group_name="events.ip_address",
event_name="ip_address",
transaction_name="ip_address",
discover_name="ip_address",
issue_platform_name="ip_address",
alias="user.ip",
)
USER_DISPLAY = Column(
group_name=None,
event_name=None,
transaction_name=None,
discover_name="user.display",
alias="user.display",
)
SDK_NAME = Column(
group_name="events.sdk_name",
event_name="sdk_name",
transaction_name="sdk_name",
discover_name="sdk_name",
issue_platform_name="sdk_name",
alias="sdk.name",
)
SDK_VERSION = Column(
group_name="events.sdk_version",
event_name="sdk_version",
transaction_name="sdk_version",
discover_name="sdk_version",
issue_platform_name="sdk_version",
alias="sdk.version",
)
UNREAL_CRASH_TYPE = Column(
group_name="events.contexts[unreal.crash_type]",
event_name="contexts[unreal.crash_type]",
transaction_name=None,
discover_name="contexts[unreal.crash_type]",
issue_platform_name="contexts[unreal.crash_type]",
alias="unreal.crash_type",
)
HTTP_METHOD = Column(
group_name="events.http_method",
event_name="http_method",
transaction_name="http_method",
discover_name="http_method",
issue_platform_name="http_method",
alias="http.method",
)
HTTP_REFERER = Column(
group_name="events.http_referer",
event_name="http_referer",
transaction_name="http_referer",
discover_name="http_referer",
issue_platform_name="http_referer",
alias="http.referer",
)
HTTP_URL = Column(
group_name="events.tags[url]",
event_name="tags[url]",
transaction_name="tags[url]",
discover_name="tags[url]",
issue_platform_name="tags[url]",
alias="http.url",
)
HTTP_STATUS_CODE = Column(
group_name="events.contexts[response.status_code]",
event_name="contexts[response.status_code]",
transaction_name="contexts[response.status_code]",
discover_name="contexts[response.status_code]",
issue_platform_name="contexts[response.status_code]",
alias="http.status_code",
)
OS_BUILD = Column(
group_name="events.contexts[os.build]",
event_name="contexts[os.build]",
transaction_name="contexts[os.build]",
discover_name="contexts[os.build]",
issue_platform_name="contexts[os.build]",
alias="os.build",
)
OS_KERNEL_VERSION = Column(
group_name="events.contexts[os.kernel_version]",
event_name="contexts[os.kernel_version]",
transaction_name="contexts[os.kernel_version]",
discover_name="contexts[os.kernel_version]",
issue_platform_name="contexts[os.kernel_version]",
alias="os.kernel_version",
)
DEVICE_ARCH = Column(
group_name="events.contexts[device.arch]",
event_name="contexts[device.arch]",
transaction_name="contexts[device.arch]",
discover_name="contexts[device.arch]",
issue_platform_name="contexts[device.arch]",
alias="device.arch",
)
DEVICE_BATTERY_LEVEL = Column(
group_name="events.contexts[device.battery_level]",
event_name="contexts[device.battery_level]",
transaction_name="contexts[device.battery_level]",
discover_name="contexts[device.battery_level]",
issue_platform_name="contexts[device.battery_level]",
alias="device.battery_level",
)
DEVICE_BRAND = Column(
group_name="events.contexts[device.brand]",
event_name="contexts[device.brand]",
transaction_name="contexts[device.brand]",
discover_name="contexts[device.brand]",
issue_platform_name="contexts[device.brand]",
alias="device.brand",
)
DEVICE_CHARGING = Column(
group_name="events.contexts[device.charging]",
event_name="contexts[device.charging]",
transaction_name="contexts[device.charging]",
discover_name="contexts[device.charging]",
issue_platform_name="contexts[device.charging]",
alias="device.charging",
)
DEVICE_LOCALE = Column(
group_name="events.contexts[device.locale]",
event_name="contexts[device.locale]",
transaction_name="contexts[device.locale]",
discover_name="contexts[device.locale]",
issue_platform_name="contexts[device.locale]",
alias="device.locale",
)
DEVICE_MODEL_ID = Column(
group_name="events.contexts[device.model_id]",
event_name="contexts[device.model_id]",
transaction_name="contexts[device.model_id]",
discover_name="contexts[device.model_id]",
issue_platform_name="contexts[device.model_id]",
alias="device.model_id",
)
DEVICE_NAME = Column(
group_name="events.contexts[device.name]",
event_name="contexts[device.name]",
transaction_name="contexts[device.name]",
discover_name="contexts[device.name]",
issue_platform_name="contexts[device.name]",
alias="device.name",
)
DEVICE_ONLINE = Column(
group_name="events.contexts[device.online]",
event_name="contexts[device.online]",
transaction_name="contexts[device.online]",
discover_name="contexts[device.online]",
issue_platform_name="contexts[device.online]",
alias="device.online",
)
DEVICE_ORIENTATION = Column(
group_name="events.contexts[device.orientation]",
event_name="contexts[device.orientation]",
transaction_name="contexts[device.orientation]",
discover_name="contexts[device.orientation]",
issue_platform_name="contexts[device.orientation]",
alias="device.orientation",
)
DEVICE_SCREEN_DENSITY = Column(
group_name="events.contexts[device.screen_density]",
event_name="contexts[device.screen_density]",
transaction_name="contexts[device.screen_density]",
discover_name="contexts[device.screen_density]",
issue_platform_name="contexts[device.screen_density]",
alias="device.screen_density",
)
DEVICE_SCREEN_DPI = Column(
group_name="events.contexts[device.screen_dpi]",
event_name="contexts[device.screen_dpi]",
transaction_name="contexts[device.screen_dpi]",
discover_name="contexts[device.screen_dpi]",
issue_platform_name="contexts[device.screen_dpi]",
alias="device.screen_dpi",
)
DEVICE_SCREEN_HEIGHT_PIXELS = Column(
group_name="events.contexts[device.screen_height_pixels]",
event_name="contexts[device.screen_height_pixels]",
transaction_name="contexts[device.screen_height_pixels]",
discover_name="contexts[device.screen_height_pixels]",
issue_platform_name="contexts[device.screen_heigh_pixels]",
alias="device.screen_height_pixels",
)
DEVICE_SCREEN_WIDTH_PIXELS = Column(
group_name="events.contexts[device.screen_width_pixels]",
event_name="contexts[device.screen_width_pixels]",
transaction_name="contexts[device.screen_width_pixels]",
discover_name="contexts[device.screen_width_pixels]",
issue_platform_name="contexts[device.screen_width_pixels]",
alias="device.screen_width_pixels",
)
DEVICE_SIMULATOR = Column(
group_name="events.contexts[device.simulator]",
event_name="contexts[device.simulator]",
transaction_name="contexts[device.simulator]",
discover_name="contexts[device.simulator]",
issue_platform_name="contexts[device.simulator]",
alias="device.simulator",
)
DEVICE_UUID = Column(
group_name="events.contexts[device.uuid]",
event_name="contexts[device.uuid]",
transaction_name="contexts[device.uuid]",
discover_name="contexts[device.uuid]",
issue_platform_name="contexts[device.uuid]",
alias="device.uuid",
)
GEO_COUNTRY_CODE = Column(
group_name="events.geo_country_code",
event_name="geo_country_code",
transaction_name="contexts[geo.country_code]",
discover_name="geo_country_code",
issue_platform_name="contexts[geo.country_code]",
alias="geo.country_code",
)
GEO_REGION = Column(
group_name="events.geo_region",
event_name="geo_region",
transaction_name="contexts[geo.region]",
discover_name="geo_region",
issue_platform_name="contexts[geo.region]",
alias="geo.region",
)
GEO_CITY = Column(
group_name="events.geo_city",
event_name="geo_city",
transaction_name="contexts[geo.city]",
discover_name="geo_city",
issue_platform_name="contexts[geo.city]",
alias="geo.city",
)
GEO_SUBDIVISION = Column(
group_name="events.geo_subdivision",
event_name="geo_subdivision",
transaction_name="contexts[geo.subdivision]",
discover_name="geo_subdivision",
issue_platform_name="contexts[geo.subdivision]",
alias="geo.subdivision",
)
ERROR_TYPE = Column(
group_name="events.exception_stacks.type",
event_name="exception_stacks.type",
transaction_name=None,
discover_name="exception_stacks.type",
alias="error.type",
)
ERROR_VALUE = Column(
group_name="events.exception_stacks.value",
event_name="exception_stacks.value",
transaction_name=None,
discover_name="exception_stacks.value",
alias="error.value",
)
ERROR_MECHANISM = Column(
group_name="events.exception_stacks.mechanism_type",
event_name="exception_stacks.mechanism_type",
transaction_name=None,
discover_name="exception_stacks.mechanism_type",
alias="error.mechanism",
)
ERROR_HANDLED = Column(
group_name="events.exception_stacks.mechanism_handled",
event_name="exception_stacks.mechanism_handled",
transaction_name=None,
discover_name="exception_stacks.mechanism_handled",
alias="error.handled",
)
ERROR_MAIN_THREAD = Column(
group_name="events.exception_main_thread",
event_name="exception_main_thread",
transaction_name=None,
discover_name="exception_main_thread",
issue_platform_name=None,
alias="error.main_thread",
)
ERROR_RECEIVED = Column(
group_name=None,
event_name="received",
transaction_name=None,
discover_name="received",
issue_platform_name="receive_timestamp",
alias="error.received",
)
STACK_ABS_PATH = Column(
group_name="events.exception_frames.abs_path",
event_name="exception_frames.abs_path",
transaction_name=None,
discover_name="exception_frames.abs_path",
alias="stack.abs_path",
)
STACK_FILENAME = Column(
group_name="events.exception_frames.filename",
event_name="exception_frames.filename",
transaction_name=None,
discover_name="exception_frames.filename",
alias="stack.filename",
)
STACK_PACKAGE = Column(
group_name="events.exception_frames.package",
event_name="exception_frames.package",
transaction_name=None,
discover_name="exception_frames.package",
alias="stack.package",
)
STACK_MODULE = Column(
group_name="events.exception_frames.module",
event_name="exception_frames.module",
transaction_name=None,
discover_name="exception_frames.module",
alias="stack.module",
)
STACK_FUNCTION = Column(
group_name="events.exception_frames.function",
event_name="exception_frames.function",
transaction_name=None,
discover_name="exception_frames.function",
alias="stack.function",
)
STACK_IN_APP = Column(
group_name="events.exception_frames.in_app",
event_name="exception_frames.in_app",
transaction_name=None,
discover_name="exception_frames.in_app",
alias="stack.in_app",
)
STACK_COLNO = Column(
group_name="events.exception_frames.colno",
event_name="exception_frames.colno",
transaction_name=None,
discover_name="exception_frames.colno",
alias="stack.colno",
)
STACK_LINENO = Column(
group_name="events.exception_frames.lineno",
event_name="exception_frames.lineno",
transaction_name=None,
discover_name="exception_frames.lineno",
alias="stack.lineno",
)
STACK_STACK_LEVEL = Column(
group_name="events.exception_frames.stack_level",
event_name="exception_frames.stack_level",
transaction_name=None,
discover_name="exception_frames.stack_level",
alias="stack.stack_level",
)
CONTEXTS_KEY = Column(
group_name="events.contexts.key",
event_name="contexts.key",
transaction_name="contexts.key",
discover_name=None,
issue_platform_name="contexts.key",
alias="contexts.key",
)
CONTEXTS_VALUE = Column(
group_name="events.contexts.value",
event_name="contexts.value",
transaction_name="contexts.value",
discover_name=None,
issue_platform_name="contexts.value",
alias="contexts.value",
)
APP_IN_FOREGROUND = Column(
group_name="events.contexts[app.in_foreground]",
event_name="contexts[app.in_foreground]",
transaction_name="contexts[app.in_foreground]",
discover_name="contexts[app.in_foreground]",
issue_platform_name="contexts[app.in_foreground]",
alias="app.in_foreground",
)
OS_DISTRIBUTION_NAME = Column(
group_name="events.contexts[os.distribution_name]",
event_name="contexts[os.distribution_name]",
transaction_name="contexts[os.distribution_name]",
discover_name="contexts[os.distribution_name]",
issue_platform_name="contexts[os.distribution_name]",
alias="os.distribution_name",
)
OS_DISTRIBUTION_VERSION = Column(
group_name="events.contexts[os.distribution_version]",
event_name="contexts[os.distribution_version]",
transaction_name="contexts[os.distribution_version]",
discover_name="contexts[os.distribution_version]",
issue_platform_name="contexts[os.distribution_version]",
alias="os.distribution_version",
)
# Transactions specific columns
TRANSACTION_OP = Column(
group_name=None,
event_name=None,
transaction_name="transaction_op",
discover_name="transaction_op",
alias="transaction.op",
)
TRANSACTION_DURATION = Column(
group_name=None,
event_name=None,
transaction_name="duration",
discover_name="duration",
issue_platform_name="transaction_duration",
alias="transaction.duration",
)
# span.duration is here to help migrate the frontend to EAP
SPAN_DURATION = Column(
group_name=None,
event_name=None,
transaction_name="duration",
discover_name="duration",
alias="span.duration",
)
TRANSACTION_STATUS = Column(
group_name=None,
event_name=None,
transaction_name="transaction_status",
discover_name="transaction_status",
alias="transaction.status",
)
TRANSACTION_SOURCE = Column(
group_name=None,
event_name=None,
transaction_name="transaction_source",
discover_name="transaction_source",
alias="transaction.source",
)
MEASUREMENTS_KEYS = Column(
group_name=None,
event_name=None,
transaction_name="measurements.key",
discover_name="measurements.key",
spans_name="measurements.key",
alias="measurements_key",
)
MEASUREMENTS_VALUES = Column(
group_name=None,
event_name=None,
transaction_name="measurements.value",
discover_name="measurements.value",
spans_name="measurements.value",
alias="measurements_value",
)
SPAN_OP_BREAKDOWNS_KEYS = Column(
group_name=None,
event_name=None,
transaction_name="span_op_breakdowns.key",
discover_name="span_op_breakdowns.key",
alias="span_op_breakdowns_key",
)
SPAN_OP_BREAKDOWNS_VALUES = Column(
group_name=None,
event_name=None,
transaction_name="span_op_breakdowns.value",
discover_name="span_op_breakdowns.value",
alias="span_op_breakdowns_value",
)
SPANS_OP = Column(
group_name=None,
event_name=None,
transaction_name="spans.op",
discover_name="spans.op",
alias="spans_op",
)
SPANS_GROUP = Column(
group_name=None,
event_name=None,
transaction_name="spans.group",
discover_name="spans.group",
alias="spans_group",
)
SPANS_EXCLUSIVE_TIME = Column(
group_name=None,
event_name=None,
transaction_name="spans.exclusive_time",
discover_name="spans.exclusive_time",
alias="spans_exclusive_time",
)
# Tracing context fields.
TRACE_ID = Column(
group_name="events.contexts[trace.trace_id]",
event_name="contexts[trace.trace_id]",
transaction_name="trace_id",
discover_name="contexts[trace.trace_id]",
issue_platform_name="trace_id",
alias="trace",
)
SPAN_ID = Column(
group_name="events.contexts[trace.span_id]",
event_name="contexts[trace.span_id]",
transaction_name="span_id",
discover_name="span_id",
alias="trace.span",
)
PARENT_SPAN_ID = Column(
group_name=None,
event_name=None,
transaction_name="contexts[trace.parent_span_id]",
discover_name="contexts[trace.parent_span_id]",
alias="trace.parent_span",
)
TRACE_CLIENT_SAMPLE_RATE = Column(
group_name="events.contexts[trace.client_sample_rate]",
event_name="contexts[trace.client_sample_rate]",
transaction_name="contexts[trace.client_sample_rate]",
discover_name="contexts[trace.client_sample_rate]",
issue_platform_name="contexts[trace.client_sample_rate]",
alias="trace.client_sample_rate",
)
# Reprocessing context
REPROCESSING_ORIGINAL_GROUP_ID = Column(
group_name="events.contexts[reprocessing.original_issue_id]",
event_name="contexts[reprocessing.original_issue_id]",
transaction_name="contexts[reprocessing.original_issue_id]",
discover_name="contexts[reprocessing.original_issue_id]",
alias="reprocessing.original_issue_id",
)
APP_START_TYPE = Column(
group_name=None,
event_name=None,
transaction_name="app_start_type",
discover_name="app_start_type",
alias="app_start_type",
)
# For transaction profiles
PROFILE_ID = Column(
group_name=None,
event_name=None,
transaction_name="profile_id",
discover_name="profile_id",
issue_platform_name="profile_id",
alias="profile.id",
)
# For continuous profiles
PROFILER_ID = Column(
group_name=None,
event_name=None,
transaction_name="profiler_id",
discover_name="profiler_id",
issue_platform_name=None, # TODO: This doesn't exist yet
alias="profiler.id",
)
THREAD_ID = Column(
group_name=None,
event_name=None,
transaction_name="contexts[trace.thread_id]",
discover_name="contexts[trace.thread_id]",
issue_platform_name=None,
alias="thread.id",
)
REPLAY_ID = Column(
group_name=None,
event_name="replay_id",
transaction_name="replay_id",
discover_name="replay_id",
issue_platform_name="replay_id",
alias="replay.id",
)
# We used to set the replay_id as a tag on error events as
# replayId. We allow this query for backwards compatibility,
# but in the future shouldn't be displayed in the UI anywhere
# as a suggested column.
REPLAY_ID_DEPRECATED = Column(
group_name=None,
event_name="replay_id",
transaction_name="replay_id",
discover_name="replay_id",
issue_platform_name="replay_id",
alias="replayId",
)
TRACE_SAMPLED = Column(
group_name=None,
event_name="trace_sampled",
transaction_name=None,
discover_name=None,
issue_platform_name=None,
alias="trace.sampled",
)
NUM_PROCESSING_ERRORS = Column(
group_name=None,
event_name="num_processing_errors",
transaction_name=None,
discover_name=None,
issue_platform_name=None,
alias="num_processing_errors",
)
SYMBOLICATED_IN_APP = Column(
group_name="events.symbolicated_in_app",
event_name="symbolicated_in_app",
transaction_name=None,
discover_name="symbolicated_in_app",
issue_platform_name=None,
alias="symbolicated_in_app",
)
OTA_UPDATES_CHANNEL = Column(
group_name="events.contexts[ota_updates.channel]",
event_name="contexts[ota_updates.channel]",
transaction_name="contexts[ota_updates.channel]",
discover_name="contexts[ota_updates.channel]",
issue_platform_name="contexts[ota_updates.channel]",
alias="ota_updates.channel",
)
OTA_UPDATES_RUNTIME_VERSION = Column(
group_name="events.contexts[ota_updates.runtime_version]",
event_name="contexts[ota_updates.runtime_version]",
transaction_name="contexts[ota_updates.runtime_version]",
discover_name="contexts[ota_updates.runtime_version]",
alias="ota_updates.runtime_version",
)
OTA_UPDATES_UPDATE_ID = Column(
group_name="events.contexts[ota_updates.update_id]",
event_name="contexts[ota_updates.update_id]",
transaction_name="contexts[ota_updates.update_id]",
discover_name="contexts[ota_updates.update_id]",
alias="ota_updates.update_id",
)
TIMESTAMP_MS = Column(
group_name="events.timestamp_ms",
event_name="timestamp_ms",
transaction_name=None,
discover_name="timestamp_ms",
issue_platform_name="timestamp_ms",
alias="timestamp_ms",
)
GROUP_FIRST_SEEN = Column(
group_name="events.group_first_seen",
event_name="group_first_seen",
transaction_name=None,
discover_name=None,
issue_platform_name="group_first_seen",
alias="group_first_seen",
)
| Columns |
python | mlflow__mlflow | tests/models/test_artifacts.py | {
"start": 717,
"end": 4402
} | class ____:
def __init__(self):
self.test = 1
@pytest.mark.parametrize(
("is_file", "artifact", "artifact_type", "ext"),
[
(True, lambda path: Figure().savefig(path), ImageEvaluationArtifact, "png"),
(True, lambda path: Figure().savefig(path), ImageEvaluationArtifact, "jpg"),
(True, lambda path: Figure().savefig(path), ImageEvaluationArtifact, "jpeg"),
(True, __generate_dummy_json_file, JsonEvaluationArtifact, "json"),
(True, lambda path: pathlib.Path(path).write_text("test"), TextEvaluationArtifact, "txt"),
(
True,
lambda path: np.save(path, np.array([1, 2, 3]), allow_pickle=False),
NumpyEvaluationArtifact,
"npy",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_csv(path, index=False),
CsvEvaluationArtifact,
"csv",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_parquet(path),
ParquetEvaluationArtifact,
"parquet",
),
(False, pd.DataFrame({"test": [1, 2, 3]}), CsvEvaluationArtifact, "csv"),
(False, np.array([1, 2, 3]), NumpyEvaluationArtifact, "npy"),
(False, Figure(), ImageEvaluationArtifact, "png"),
(False, {"a": 1, "b": "e", "c": 1.2, "d": [1, 2]}, JsonEvaluationArtifact, "json"),
(False, [1, 2, 3, "test"], JsonEvaluationArtifact, "json"),
(False, '{"a": 1, "b": [1.2, 3]}', JsonEvaluationArtifact, "json"),
(False, '[1, 2, 3, "test"]', JsonEvaluationArtifact, "json"),
(False, __DummyClass(), PickleEvaluationArtifact, "pickle"),
],
)
def test_infer_artifact_type_and_ext(is_file, artifact, artifact_type, ext, tmp_path, cm_fn_tuple):
if is_file:
artifact_representation = tmp_path / f"test.{ext}"
artifact(artifact_representation)
else:
artifact_representation = artifact
inferred_from_path, inferred_type, inferred_ext = _infer_artifact_type_and_ext(
f"{ext}_{artifact_type.__name__}_artifact", artifact_representation, cm_fn_tuple
)
assert not is_file ^ inferred_from_path
assert inferred_type is artifact_type
assert inferred_ext == f".{ext}"
def test_infer_artifact_type_and_ext_raise_exception_for_non_file_non_json_str(cm_fn_tuple):
with pytest.raises(
MlflowException,
match="with string representation 'some random str' that is "
"neither a valid path to a file nor a JSON string",
):
_infer_artifact_type_and_ext("test_artifact", "some random str", cm_fn_tuple)
def test_infer_artifact_type_and_ext_raise_exception_for_non_existent_path(tmp_path, cm_fn_tuple):
path = tmp_path / "does_not_exist_path"
with pytest.raises(MlflowException, match=f"with path '{path}' does not exist"):
_infer_artifact_type_and_ext("test_artifact", path, cm_fn_tuple)
def test_infer_artifact_type_and_ext_raise_exception_for_non_file_artifact(tmp_path, cm_fn_tuple):
with pytest.raises(MlflowException, match=f"with path '{tmp_path}' is not a file"):
_infer_artifact_type_and_ext("non_file_artifact", tmp_path, cm_fn_tuple)
def test_infer_artifact_type_and_ext_raise_exception_for_unsupported_ext(tmp_path, cm_fn_tuple):
path = tmp_path / "invalid_ext_example.some_ext"
with open(path, "w") as f:
f.write("some stuff that shouldn't be read")
with pytest.raises(
MlflowException,
match=f"with path '{path}' does not match any of the supported file extensions",
):
_infer_artifact_type_and_ext("invalid_ext_artifact", path, cm_fn_tuple)
| __DummyClass |
python | protocolbuffers__protobuf | python/google/protobuf/text_format.py | {
"start": 1909,
"end": 2510
} | class ____(Error):
"""Thrown in case of text parsing or tokenizing error."""
def __init__(self, message=None, line=None, column=None):
if message is not None and line is not None:
loc = str(line)
if column is not None:
loc += ':{0}'.format(column)
message = '{0} : {1}'.format(loc, message)
if message is not None:
super(ParseError, self).__init__(message)
else:
super(ParseError, self).__init__()
self._line = line
self._column = column
def GetLine(self):
return self._line
def GetColumn(self):
return self._column
| ParseError |
python | PyCQA__pylint | tests/regrtest_data/special_attr_scope_lookup_crash.py | {
"start": 0,
"end": 52
} | class ____(object):
"""A"""
__doc__ += "B"
| Klass |
python | openai__openai-python | src/openai/types/beta/realtime/transcription_session_update.py | {
"start": 6344,
"end": 6694
} | class ____(BaseModel):
session: Session
"""Realtime transcription session object configuration."""
type: Literal["transcription_session.update"]
"""The event type, must be `transcription_session.update`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| TranscriptionSessionUpdate |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 5084,
"end": 5317
} | class ____(graphene.ObjectType):
codeReferences = non_null_list(GrapheneSourceLocation)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "CodeReferencesMetadataEntry"
| GrapheneCodeReferencesMetadataEntry |
python | facebookresearch__faiss | tests/test_fast_scan_ivf.py | {
"start": 13280,
"end": 13338
} | class ____(TestIVFImplem12):
IMPLEM = 15
| TestIVFImplem15 |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0094_auto_20221221_1045.py | {
"start": 182,
"end": 1066
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0093_migrate_null_fields"),
]
operations = [
migrations.AlterField(
model_name="projectrelationship",
name="child",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="superprojects",
to="projects.project",
verbose_name="Subproject",
),
),
migrations.AlterField(
model_name="projectrelationship",
name="parent",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subprojects",
to="projects.project",
verbose_name="Main project",
),
),
]
| Migration |
python | paramiko__paramiko | paramiko/server.py | {
"start": 25190,
"end": 26562
} | class ____:
"""
A query (set of prompts) for a user during interactive authentication.
"""
def __init__(self, name="", instructions="", *prompts):
"""
Create a new interactive query to send to the client. The name and
instructions are optional, but are generally displayed to the end
user. A list of prompts may be included, or they may be added via
the `add_prompt` method.
:param str name: name of this query
:param str instructions:
user instructions (usually short) about this query
:param str prompts: one or more authentication prompts
"""
self.name = name
self.instructions = instructions
self.prompts = []
for x in prompts:
if isinstance(x, str):
self.add_prompt(x)
else:
self.add_prompt(x[0], x[1])
def add_prompt(self, prompt, echo=True):
"""
Add a prompt to this query. The prompt should be a (reasonably short)
string. Multiple prompts can be added to the same query.
:param str prompt: the user prompt
:param bool echo:
``True`` (default) if the user's response should be echoed;
``False`` if not (for a password or similar)
"""
self.prompts.append((prompt, echo))
| InteractiveQuery |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 16775,
"end": 19014
} | class ____(BuiltinClass):
"""Implementation of builtins.object."""
_NAME = "object"
def is_object_new(self, func):
"""Whether the given function is object.__new__.
Args:
func: A function.
Returns:
True if func equals either of the pytd definitions for object.__new__,
False otherwise.
"""
self.load_lazy_attribute("__new__")
self.load_lazy_attribute("__new__extra_args")
return [func] == self.members["__new__"].data or [func] == self.members[
"__new__extra_args"
].data
def _has_own(self, node, cls, method):
"""Whether a class has its own implementation of a particular method.
Args:
node: The current node.
cls: An abstract.Class.
method: The method name. So that we don't have to handle the cases when
the method doesn't exist, we only support "__new__" and "__init__".
Returns:
True if the class's definition of the method is different from the
definition in builtins.object, False otherwise.
"""
assert method in ("__new__", "__init__")
if not isinstance(cls, abstract.Class):
return False
self.load_lazy_attribute(method)
obj_method = self.members[method]
_, cls_method = self.ctx.attribute_handler.get_attribute(node, cls, method)
return obj_method.data != cls_method.data
def get_special_attribute(self, node, name, valself):
# Based on the definitions of object_init and object_new in
# cpython/Objects/typeobject.c (https://goo.gl/bTEBRt). It is legal to pass
# extra arguments to object.__new__ if the calling class overrides
# object.__init__, and vice versa.
if valself and not abstract_utils.equivalent_to(valself, self):
val = valself.data
if name == "__new__" and self._has_own(node, val, "__init__"):
self.load_lazy_attribute("__new__extra_args")
return self.members["__new__extra_args"]
elif (
name == "__init__"
and isinstance(val, abstract.Instance)
and self._has_own(node, val.cls, "__new__")
):
self.load_lazy_attribute("__init__extra_args")
return self.members["__init__extra_args"]
return super().get_special_attribute(node, name, valself)
| Object |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/__init__.py | {
"start": 4206,
"end": 4535
} | class ____:
def __init__(self):
"""Create a new `CustomIter`."""
self.values = range(10)
def __iter__(self):
"""Iterate squares of each value."""
for i in self.values:
yield i**2
def snafucate(self):
"""Makes this snafucated."""
print('snafucated')
| CustomIter |
python | numba__numba | numba/tests/test_listimpl.py | {
"start": 292,
"end": 5294
} | class ____(object):
"""A wrapper around the C-API to provide a minimal list object for
testing.
"""
def __init__(self, tc, item_size, allocated):
"""
Parameters
----------
tc : TestCase instance
item_size : int
byte size for the items
allocated : int
number of items to allocate for
"""
self.tc = tc
self.item_size = item_size
self.lp = self.list_new(item_size, allocated)
# The following methods implement part of the list API
def __del__(self):
self.tc.numba_list_free(self.lp)
def __len__(self):
return self.list_length()
def __setitem__(self, i, item):
return self.list_setitem(i, item)
def __getitem__(self, i):
return self.list_getitem(i)
def __iter__(self):
return ListIter(self)
def __delitem__(self, i):
self.list_delitem(i)
def handle_index(self, i):
# handling negative indices is done at the compiler level, so we only
# support -1 to be last element of the list here
if i < -1 or len(self) == 0:
IndexError("list index out of range")
elif i == -1:
i = len(self) - 1
return i
@property
def allocated(self):
return self.list_allocated()
@property
def is_mutable(self):
return self.list_is_mutable()
def set_mutable(self):
return self.list_set_is_mutable(1)
def set_immutable(self):
return self.list_set_is_mutable(0)
def append(self, item):
self.list_append(item)
def pop(self, i=-1):
return self.list_pop(i)
# The methods below are higher-level wrappers for the C-API wrappers
def list_new(self, item_size, allocated):
lp = ctypes.c_void_p()
status = self.tc.numba_list_new(
ctypes.byref(lp), item_size, allocated,
)
self.tc.assertEqual(status, LIST_OK)
return lp
def list_length(self):
return self.tc.numba_list_length(self.lp)
def list_allocated(self):
return self.tc.numba_list_allocated(self.lp)
def list_is_mutable(self):
return self.tc.numba_list_is_mutable(self.lp)
def list_set_is_mutable(self, is_mutable):
return self.tc.numba_list_set_is_mutable(self.lp, is_mutable)
def list_setitem(self, i, item):
status = self.tc.numba_list_setitem(self.lp, i, item)
if status == LIST_ERR_INDEX:
raise IndexError("list index out of range")
elif status == LIST_ERR_IMMUTABLE:
raise ValueError("list is immutable")
else:
self.tc.assertEqual(status, LIST_OK)
def list_getitem(self, i):
i = self.handle_index(i)
item_out_buffer = ctypes.create_string_buffer(self.item_size)
status = self.tc.numba_list_getitem(self.lp, i, item_out_buffer)
if status == LIST_ERR_INDEX:
raise IndexError("list index out of range")
else:
self.tc.assertEqual(status, LIST_OK)
return item_out_buffer.raw
def list_append(self, item):
status = self.tc.numba_list_append(self.lp, item)
if status == LIST_ERR_IMMUTABLE:
raise ValueError("list is immutable")
self.tc.assertEqual(status, LIST_OK)
def list_pop(self, i):
# pop is getitem and delitem
i = self.handle_index(i)
item = self.list_getitem(i)
self.list_delitem(i)
return item
def list_delitem(self, i):
# special case slice
if isinstance(i, slice):
status = self.tc.numba_list_delete_slice(self.lp,
i.start,
i.stop,
i.step)
if status == LIST_ERR_IMMUTABLE:
raise ValueError("list is immutable")
self.tc.assertEqual(status, LIST_OK)
# must be an integer, defer to delitem
else:
i = self.handle_index(i)
status = self.tc.numba_list_delitem(self.lp, i)
if status == LIST_ERR_INDEX:
raise IndexError("list index out of range")
elif status == LIST_ERR_IMMUTABLE:
raise ValueError("list is immutable")
self.tc.assertEqual(status, LIST_OK)
def list_iter(self, itptr):
self.tc.numba_list_iter(itptr, self.lp)
def list_iter_next(self, itptr):
bi = ctypes.c_void_p(0)
status = self.tc.numba_list_iter_next(
itptr, ctypes.byref(bi),
)
if status == LIST_ERR_MUTATED:
raise ValueError('list mutated')
elif status == LIST_ERR_ITER_EXHAUSTED:
raise StopIteration
else:
self.tc.assertGreaterEqual(status, 0)
item = (ctypes.c_char * self.item_size).from_address(bi.value)
return item.value
| List |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py | {
"start": 8914,
"end": 10556
} | class ____(metaclass=abc.ABCMeta):
"""Abstract class for sparse matrix types."""
@abc.abstractmethod
def __init__(self):
self._eager_mode = context.executing_eagerly()
@abc.abstractproperty
def _matrix(self):
pass
@abc.abstractmethod
def _from_matrix(self, matrix, handle_data=None):
pass
@abc.abstractmethod
def to_dense(self):
pass
@abc.abstractmethod
def to_sparse_tensor(self):
pass
@property
def graph(self):
return self._matrix.graph
@property
def shape(self):
return dense_shape_and_type(self._matrix).shape
@property
def dtype(self):
return dense_shape_and_type(self._matrix).dtype
@property
def eager_handle_data(self):
"""Return the matrix's handle data iff in eager mode."""
return _get_handle_data(self._matrix) if self._eager_mode else None
def conj(self):
return self._from_matrix(
math_ops.conj(self._matrix), self.eager_handle_data)
def hermitian_transpose(self):
"""Return the hermitian transpose of the matrix."""
return self._from_matrix(
sm_ops.sparse_matrix_transpose(
self._matrix, conjugate=True, type=self.dtype),
self.eager_handle_data)
def nnz(self):
"""Number of stored values, including explicit zeros."""
return sm_ops.sparse_matrix_nnz(self._matrix)
nonzero = nnz
def sorted_indices(self):
# TODO(ebrevdo): A more efficient implementation?
return self.to_sparse_tensor().indices
def transpose(self):
return self._from_matrix(
sm_ops.sparse_matrix_transpose(self._matrix, type=self.dtype),
self.eager_handle_data)
| SparseMatrix |
python | django__django | tests/auth_tests/test_context_processors.py | {
"start": 2087,
"end": 5762
} | class ____(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
@override_settings(MIDDLEWARE=AUTH_MIDDLEWARE)
def test_session_not_accessed(self):
"""
The session is not accessed simply by including
the auth context processor
"""
response = self.client.get("/auth_processor_no_attr_access/")
self.assertContains(response, "Session not accessed")
@override_settings(MIDDLEWARE=AUTH_MIDDLEWARE)
def test_session_is_accessed(self):
"""
The session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get("/auth_processor_attr_access/")
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username="normal", password="secret")
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename="add_permission",
)
)
self.client.force_login(u)
response = self.client.get("/auth_processor_perms/")
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexistent")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username="normal", password="secret")
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename="add_permission",
)
)
self.client.login(username="normal", password="secret")
response = self.client.get("/auth_processor_perm_in_perms/")
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexistent")
def test_message_attrs(self):
self.client.force_login(self.superuser)
response = self.client.get("/auth_processor_messages/")
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
The lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username="super", password="secret")
user = authenticate(username="super", password="secret")
response = self.client.get("/auth_processor_user/")
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: %d" % self.superuser.pk)
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# A Q() comparing a user and with another Q() (in an AND or OR
# fashion).
Q(user=response.context["user"]) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context["user"], user)
self.assertEqual(user, response.context["user"])
| AuthContextProcessorTests |
python | django__django | tests/no_models/tests.py | {
"start": 70,
"end": 309
} | class ____(SimpleTestCase):
def test_no_models(self):
"""It's possible to load an app with no models.py file."""
app_config = apps.get_app_config("no_models")
self.assertIsNone(app_config.models_module)
| NoModelTests |
python | spyder-ide__spyder | spyder/config/tests/test_user.py | {
"start": 13436,
"end": 14669
} | class ____:
def test_spyderconfig_apply_configuration_patches_42(
self, spyderconfig_patches_42):
# Check that the value is updated
value = spyderconfig_patches_42.get('ipython_console',
'startup/run_lines')
expected_value = 'value1; value2'
assert value == expected_value
def test_spyderconfig_apply_configuration_patches_45(
self, spyderconfig_patches_45):
# Check that the value is not updated
value = spyderconfig_patches_45.get('ipython_console',
'startup/run_lines')
expected_value = 'value1,value2'
assert value == expected_value
def test_spyderconfig_get_defaults_path_name_from_version(spyderconfig):
func = spyderconfig.get_defaults_path_name_from_version
_, name = func('50.0.0')
assert name == 'defaults-50.0.0'
path, name = func('51.0.0')
assert name == 'defaults-spyder-test-51.0.0'
assert path.endswith('defaults')
path, name = func('53.0.0')
assert name == 'defaults-spyder-test-53.0.0'
assert path.endswith('defaults')
if __name__ == "__main__":
pytest.main()
| TestSpyderConfigApplyPatches |
python | walkccc__LeetCode | solutions/739. Daily Temperatures/739.py | {
"start": 0,
"end": 366
} | class ____:
def dailyTemperatures(self, temperatures: list[int]) -> list[int]:
ans = [0] * len(temperatures)
stack = [] # a decreasing stack
for i, temperature in enumerate(temperatures):
while stack and temperature > temperatures[stack[-1]]:
index = stack.pop()
ans[index] = i - index
stack.append(i)
return ans
| Solution |
python | facebook__pyre-check | client/commands/tests/expression_level_coverage_test.py | {
"start": 516,
"end": 26966
} | class ____(testslide.TestCase):
def test_make_expression_level_coverage_response(self) -> None:
self.assertEqual(
expression_level_coverage._make_expression_level_coverage_response(
daemon_query.Response(
{
"response": [
[
"CoverageAtPath",
{
"path": "test.py",
"total_expressions": 7,
"coverage_gaps": [
{
"location": {
"start": {"line": 11, "column": 16},
"stop": {"line": 11, "column": 17},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
],
},
],
]
}
).payload,
),
expression_level_coverage.ExpressionLevelCoverageResponse(
response=[
expression_level_coverage.CoverageAtPathResponse(
CoverageAtPath=expression_level_coverage.CoverageAtPath(
path="test.py",
total_expressions=7,
coverage_gaps=[
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(
line=11, column=16
),
stop=expression_level_coverage.Pair(
line=11, column=17
),
),
function_name=None,
type_="typing.Any",
reason=["TypeIsAny message."],
)
],
)
)
],
),
)
self.assertEqual(
expression_level_coverage._make_expression_level_coverage_response(
daemon_query.Response(
{
"response": [
[
"ErrorAtPath",
{
"path": "test.py",
"error": "Not able to get lookups in: `test.py` (file not found)",
},
],
]
}
).payload,
),
expression_level_coverage.ExpressionLevelCoverageResponse(
response=[
expression_level_coverage.ErrorAtPathResponse(
expression_level_coverage.ErrorAtPath(
path="test.py",
error="Not able to get lookups in: `test.py` (file not found)",
)
)
],
),
)
with self.assertRaises(expression_level_coverage.ErrorParsingFailure):
expression_level_coverage._make_expression_level_coverage_response(
"garbage input"
)
def test_calculate_percent_covered(self) -> None:
self.assertEqual(
expression_level_coverage._calculate_percent_covered(0, 0), 100.0
)
self.assertEqual(
expression_level_coverage._calculate_percent_covered(3, 7), 57.14
)
def test_get_total_and_uncovered_expressions(self) -> None:
coverage = expression_level_coverage.CoverageAtPath(
path="test.py",
total_expressions=7,
coverage_gaps=[
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(line=11, column=16),
stop=expression_level_coverage.Pair(line=11, column=17),
),
function_name=None,
type_="typing.Any",
reason=["TypeIsAny message."],
)
],
)
self.assertEqual(
expression_level_coverage._get_total_and_uncovered_expressions(coverage),
(7, 1),
)
def test_get_percent_covered_per_path(self) -> None:
def assert_get_percent_covered_per_path(
response: expression_level_coverage.CoverageAtPathResponse, expected: float
) -> None:
self.assertEqual(
expression_level_coverage.get_percent_covered_per_path(response),
expected,
)
assert_get_percent_covered_per_path(
expression_level_coverage.CoverageAtPathResponse(
CoverageAtPath=expression_level_coverage.CoverageAtPath(
path="test.py",
total_expressions=0,
coverage_gaps=[],
)
),
100.0,
)
assert_get_percent_covered_per_path(
expression_level_coverage.CoverageAtPathResponse(
CoverageAtPath=expression_level_coverage.CoverageAtPath(
path="test.py",
total_expressions=5,
coverage_gaps=[
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(
line=11, column=16
),
stop=expression_level_coverage.Pair(line=11, column=17),
),
function_name=None,
type_="typing.Any",
reason=["TypeIsAny message."],
)
],
)
),
80.0,
)
def test_summary_expression_level_coverage(self) -> None:
def assert_summary_expression_level_coverage(
response: object, expected: str
) -> None:
self.assertEqual(
expression_level_coverage.summary_expression_level(response),
expected,
)
assert_summary_expression_level_coverage(
daemon_query.Response({"response": []}).payload,
"Overall: 100.0% expressions are covered",
)
assert_summary_expression_level_coverage(
daemon_query.Response(
{
"response": [
[
"CoverageAtPath",
{
"path": "test.py",
"total_expressions": 0,
"coverage_gaps": [],
},
],
]
}
).payload,
"test.py: 100.0% expressions are covered\n"
"Overall: 100.0% expressions are covered",
)
assert_summary_expression_level_coverage(
daemon_query.Response(
{
"response": [
[
"CoverageAtPath",
{
"path": "test.py",
"total_expressions": 7,
"coverage_gaps": [
{
"location": {
"start": {"line": 11, "column": 16},
"stop": {"line": 11, "column": 17},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
{
"location": {
"start": {"line": 12, "column": 11},
"stop": {"line": 12, "column": 12},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
],
},
],
]
}
).payload,
"test.py: 71.43% expressions are covered\n"
"Overall: 71.43% expressions are covered",
)
assert_summary_expression_level_coverage(
daemon_query.Response(
{
"response": [
[
"CoverageAtPath",
{
"path": "library.py",
"total_expressions": 4,
"coverage_gaps": [],
},
],
[
"CoverageAtPath",
{
"path": "test.py",
"total_expressions": 7,
"coverage_gaps": [
{
"location": {
"start": {"line": 11, "column": 16},
"stop": {"line": 11, "column": 17},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
{
"location": {
"start": {"line": 12, "column": 11},
"stop": {"line": 12, "column": 12},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
],
},
],
]
}
).payload,
"library.py: 100.0% expressions are covered\n"
"test.py: 71.43% expressions are covered\n"
"Overall: 81.82% expressions are covered",
)
assert_summary_expression_level_coverage(
daemon_query.Response(
{
"response": [
[
"ErrorAtPath",
{
"path": "fake.py",
"error": "Not able to get lookups in: `fake.py` (file not found)",
},
],
[
"CoverageAtPath",
{
"path": "test.py",
"total_expressions": 7,
"coverage_gaps": [
{
"location": {
"start": {"line": 11, "column": 16},
"stop": {"line": 11, "column": 17},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
{
"location": {
"start": {"line": 12, "column": 11},
"stop": {"line": 12, "column": 12},
},
"function_name": None,
"type_": "typing.Any",
"reason": ["TypeIsAny message."],
},
],
},
],
]
}
).payload,
"test.py: 71.43% expressions are covered\nOverall: 71.43% expressions are covered",
)
def test_CoveragePaths(self) -> None:
with tempfile.TemporaryDirectory() as root:
global_root = Path(root).resolve()
setup.ensure_directories_exists(
global_root,
[".pyre", "allows", "blocks", "search", "local/src/subpackage"],
)
setup.write_configuration_file(
global_root,
{
"only_check_paths": ["allows", "nonexistent"],
"ignore_all_errors": ["blocks", "nonexistent"],
"exclude": ["exclude"],
"extensions": [".ext", "invalid_extension"],
"workers": 42,
"search_path": ["search", "nonexistent"],
"strict": True,
},
)
setup.write_configuration_file(
global_root, {"source_directories": ["src"]}, relative="local"
)
local_root = global_root / "local"
Path(local_root / "src/a.py").touch()
Path(local_root / "src/b.py").touch()
Path(local_root / "src/subpackage/c.py").touch()
temp_configuration: configuration.Configuration = (
configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=global_root / ".pyre",
),
global_root,
)
)
with setup.switch_working_directory(local_root):
def assert_backend_paths(
raw_paths: Iterable[str],
expected: List[str],
) -> None:
self.assertEqual(
sorted(
expression_level_coverage.CoveragePaths.from_raw_path_arguments(
raw_paths=raw_paths,
configuration=frontend_configuration.OpenSource(
temp_configuration
),
).get_paths_for_backend()
),
sorted(expected),
)
assert_backend_paths(
raw_paths=[],
expected=[
str(local_root / "src/a.py"),
str(local_root / "src/b.py"),
str(local_root / "src/subpackage/c.py"),
],
)
assert_backend_paths(
raw_paths=["@arguments.txt", "@/absolute/arguments.txt"],
expected=[
"@" + str(local_root / "arguments.txt"),
"@/absolute/arguments.txt",
],
)
assert_backend_paths(
raw_paths=[
str(local_root / "src/a.py"), # absolute path
"src/b.py", # relative path
],
expected=[
str(local_root / "src/a.py"),
str(local_root / "src/b.py"),
],
)
assert_backend_paths(
raw_paths=[
"src/subpackage", # directory as a path
],
expected=[
str(local_root / "src/subpackage/c.py"),
],
)
def test_backend_exception(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(
root_path, [".pyre", "allows", "blocks", "search", "local/src"]
)
setup.write_configuration_file(
root_path,
{
"only_check_paths": ["allows", "nonexistent"],
"ignore_all_errors": ["blocks", "nonexistent"],
"exclude": ["exclude"],
"extensions": [".ext", "invalid_extension"],
"workers": 42,
"search_path": ["search", "nonexistent"],
"strict": True,
},
)
setup.write_configuration_file(
root_path, {"source_directories": ["src"]}, relative="local"
)
check_configuration = frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=root_path / ".pyre",
),
root_path,
)
)
self.mock_callable(daemon_query, "execute_query").to_raise(
connections.ConnectionFailure
)
self.assertEqual(
expression_level_coverage.run(
configuration=check_configuration,
paths=[],
print_summary=False,
),
commands.ExitCode.SERVER_NOT_FOUND,
)
def test_location_to_range(self) -> None:
def assert_location_to_range(
response: expression_level_coverage.Location, expected: lsp.LspRange
) -> None:
self.assertEqual(
expression_level_coverage.location_to_range(response),
expected,
)
assert_location_to_range(
expression_level_coverage.Location(
start=expression_level_coverage.Pair(line=1, column=1),
stop=expression_level_coverage.Pair(line=1, column=7),
),
lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=0, character=7),
),
)
def test_make_diagnostic_for_coverage_gap(self) -> None:
def assert_make_diagnostic_for_coverage_gap(
response: expression_level_coverage.CoverageGap, expected: lsp.Diagnostic
) -> None:
self.assertEqual(
expression_level_coverage.make_diagnostic_for_coverage_gap(response),
expected,
)
assert_make_diagnostic_for_coverage_gap(
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(line=1, column=1),
stop=expression_level_coverage.Pair(line=1, column=7),
),
function_name=None,
type_="typing.Any",
reason=["TypeIsAny message."],
),
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=0, character=7),
),
message="TypeIsAny message.",
),
)
def test_get_uncovered_expression_diagnostics(self) -> None:
def assert_get_uncovered_expression_diagnostics(
response: expression_level_coverage.ExpressionLevelCoverageResponse,
expected: List[lsp.Diagnostic],
) -> None:
self.assertEqual(
expression_level_coverage.get_uncovered_expression_diagnostics(
response
),
expected,
)
assert_get_uncovered_expression_diagnostics(
expression_level_coverage.ExpressionLevelCoverageResponse(
response=[
expression_level_coverage.ErrorAtPathResponse(
ErrorAtPath=expression_level_coverage.ErrorAtPath(
path="test.py",
error="error",
)
)
],
),
[],
)
assert_get_uncovered_expression_diagnostics(
expression_level_coverage.ExpressionLevelCoverageResponse(
response=[
expression_level_coverage.CoverageAtPathResponse(
CoverageAtPath=expression_level_coverage.CoverageAtPath(
path="test.py",
total_expressions=7,
coverage_gaps=[
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(
line=1, column=1
),
stop=expression_level_coverage.Pair(
line=1, column=7
),
),
function_name=None,
type_="typing.Any",
reason=["TypeIsAny message."],
)
],
)
)
],
),
[
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=0, character=7),
),
message="TypeIsAny message.",
)
],
)
assert_get_uncovered_expression_diagnostics(
expression_level_coverage.ExpressionLevelCoverageResponse(
response=[
expression_level_coverage.CoverageAtPathResponse(
CoverageAtPath=expression_level_coverage.CoverageAtPath(
path="test.py",
total_expressions=7,
coverage_gaps=[
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(
line=1, column=1
),
stop=expression_level_coverage.Pair(
line=1, column=7
),
),
function_name=None,
type_="typing.List[typing.Any]",
reason=["ContainerParameterIsAny message."],
),
expression_level_coverage.CoverageGap(
location=expression_level_coverage.Location(
start=expression_level_coverage.Pair(
line=2, column=4
),
stop=expression_level_coverage.Pair(
line=2, column=7
),
),
function_name=None,
type_="typing.Callable(foo.foo)[[Named(x, unknown)], None]",
reason=["CallableParameterIsUnknownOrAny message."],
),
],
)
)
],
),
[
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=0, character=1),
end=lsp.LspPosition(line=0, character=7),
),
message="ContainerParameterIsAny message.",
),
lsp.Diagnostic(
range=lsp.LspRange(
start=lsp.LspPosition(line=1, character=4),
end=lsp.LspPosition(line=1, character=7),
),
message="CallableParameterIsUnknownOrAny message.",
),
],
)
| ExpressionLevelTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-aws-datalake/destination_aws_datalake/aws.py | {
"start": 2979,
"end": 3880
} | class ____(object):
METHOD = "assume-role"
def __init__(self, fetcher):
self._fetcher = fetcher
def load(self):
return DeferredRefreshableCredentials(self._fetcher.fetch_credentials, self.METHOD)
@staticmethod
def assume_role_refreshable(
session: botocore.session.Session, role_arn: str, duration: int = 3600, session_name: str = None
) -> botocore.session.Session:
fetcher = AssumeRoleCredentialFetcher(
session.create_client,
session.get_credentials(),
role_arn,
extra_args={"DurationSeconds": duration, "RoleSessionName": session_name},
cache=JSONFileCache(),
)
role_session = botocore.session.Session()
role_session.register_component("credential_provider", CredentialResolver([AssumeRoleProvider(fetcher)]))
return role_session
| AssumeRoleProvider |
python | pytorch__pytorch | torch/distributed/checkpoint/planner.py | {
"start": 947,
"end": 1835
} | class ____:
"""Dataclass which holds information about what needs to be written to storage."""
index: MetadataIndex
type: WriteItemType
# Size of bytesIO data to be written.
bytes_io_data: Optional[BytesIOWriteData] = None
# Value present if it's a tensor write
tensor_data: Optional[TensorWriteData] = None
def tensor_storage_size(self) -> Optional[int]:
"""
Calculates the storage size of the underlying tensor, or None if this is not a tensor write.
Returns:
Optional[int] storage size, in bytes of underlying tensor if any.
"""
if self.tensor_data is None:
return None
numels = reduce(operator.mul, self.tensor_data.size, 1)
dtype_size = torch._utils._element_size(self.tensor_data.properties.dtype)
return numels * dtype_size
@dataclass(frozen=True)
| WriteItem |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/host_info_utils.py | {
"start": 946,
"end": 2683
} | class ____(Enum):
X86_64 = "x86_64"
X86 = "x86"
PPC = "ppc"
ARM = "arm"
def get_host_user_id() -> str:
from airflow_breeze.utils.run_utils import run_command
host_user_id = ""
os = get_host_os()
if os == "linux" or os == "darwin":
host_user_id = run_command(
cmd=["id", "-ur"], capture_output=True, text=True, quiet=True
).stdout.strip()
return host_user_id
def get_host_group_id() -> str:
from airflow_breeze.utils.run_utils import run_command
host_group_id = ""
os = get_host_os()
if os == "linux" or os == "darwin":
host_group_id = run_command(
cmd=["id", "-gr"], capture_output=True, text=True, quiet=True
).stdout.strip()
return host_group_id
def get_host_os() -> str:
return platform.system().lower()
_MACHINE_TO_ARCHITECTURE: dict[str, Architecture] = {
"amd64": Architecture.X86_64,
"x86_64": Architecture.X86_64,
"i686-64": Architecture.X86_64,
"i386": Architecture.X86,
"i686": Architecture.X86,
"x86": Architecture.X86,
"ia64": Architecture.X86, # Itanium is different x64 arch, treat it as the common x86.
"powerpc": Architecture.PPC,
"power macintosh": Architecture.PPC,
"ppc64": Architecture.PPC,
"armv6": Architecture.ARM,
"armv6l": Architecture.ARM,
"arm64": Architecture.ARM,
"armv7": Architecture.ARM,
"armv7l": Architecture.ARM,
"aarch64": Architecture.ARM,
}
def get_host_architecture() -> tuple[Architecture | None, str]:
"""Get architecture in the form of Tuple: standardized architecture, original platform"""
machine = platform.machine()
return _MACHINE_TO_ARCHITECTURE.get(machine.lower()), machine
| Architecture |
python | walkccc__LeetCode | solutions/874. Walking Robot Simulation/874.py | {
"start": 0,
"end": 668
} | class ____:
def robotSim(self, commands: list[int], obstacles: list[list[int]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
ans = 0
d = 0 # 0 := north, 1 := east, 2 := south, 3 := west
x = 0 # the start x
y = 0 # the start y
obstaclesSet = {(x, y) for x, y in obstacles}
for command in commands:
if command == -1:
d = (d + 1) % 4
elif command == -2:
d = (d + 3) % 4
else:
for _ in range(command):
if (x + DIRS[d][0], y + DIRS[d][1]) in obstaclesSet:
break
x += DIRS[d][0]
y += DIRS[d][1]
ans = max(ans, x * x + y * y)
return ans
| Solution |
python | huggingface__transformers | tests/models/seggpt/test_modeling_seggpt.py | {
"start": 5688,
"end": 13788
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SegGpt does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (SegGptModel, SegGptForImageSegmentation) if is_torch_available() else ()
test_resize_embeddings = False
test_torch_exportable = True
pipeline_model_mapping = (
{"feature-extraction": SegGptModel, "mask-generation": SegGptModel} if is_torch_available() else {}
)
def setUp(self):
self.model_tester = SegGptModelTester(self)
self.config_tester = ConfigTester(self, config_class=SegGptConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SegGpt does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values", "prompt_pixel_values", "prompt_masks"]
self.assertListEqual(arg_names[:3], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
patch_height = patch_width = config.image_size // config.patch_size
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[patch_height, patch_width, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_batching_equivalence(self):
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
else:
batched_row = batched_object[:1]
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}"
)
self.assertTrue(
torch.max(torch.abs(batched_row - single_row_object)) <= 1e-03,
msg=(
f"Batched and Single row outputs are not equal in {model_name} for key={key}. "
f"Difference={torch.max(torch.abs(batched_row - single_row_object))}."
),
)
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0:
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
for key in model_batched_output:
# the first hidden state in SegGPT has weird hack of adding first half of batch with second half
if key == "hidden_states":
model_batched_output[key] = model_batched_output[key][1:]
model_row_output[key] = model_row_output[key][1:]
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
def test_seggpt_loss(self):
torch.manual_seed(100)
config = self.model_tester.get_config()
prompt_masks = torch.rand(1, config.num_channels, config.image_size, config.image_size)
label = torch.rand(1, config.num_channels, config.image_size, config.image_size)
pred_masks = torch.rand(1, config.num_channels, config.image_size * 2, config.image_size)
# seq_len x 2 because the loss concatenates prompt_masks and labels as pred_masks is concatenated
bool_masked_pos = torch.rand(1, self.model_tester.seq_length * 2) > 0.5
loss = SegGptLoss(config)
loss_value = loss(prompt_masks, pred_masks, label, bool_masked_pos)
expected_loss_value = torch.tensor(0.3340)
torch.testing.assert_close(loss_value, expected_loss_value, rtol=1e-4, atol=1e-4)
@slow
def test_model_from_pretrained(self):
model_name = "BAAI/seggpt-vit-large"
model = SegGptModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def prepare_img():
ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"]
images = [image.convert("RGB") for image in ds["image"]]
masks = [image.convert("RGB") for image in ds["mask"]]
return images, masks
def prepare_bool_masked_pos(config: SegGptConfig):
num_patches = math.prod([i // config.patch_size for i in config.image_size])
mask_ratio = 0.75
torch.manual_seed(2)
num_masked_patches = int(num_patches * mask_ratio)
shuffle_idx = torch.randperm(num_patches)
bool_masked_pos = torch.FloatTensor([0] * (num_patches - num_masked_patches) + [1] * num_masked_patches)[
shuffle_idx
]
bool_masked_pos = bool_masked_pos.unsqueeze(0).bool()
return bool_masked_pos
@require_torch
@require_vision
| SegGptModelTest |
python | numba__numba | numba/tests/support.py | {
"start": 43717,
"end": 46877
} | class ____(CompilerBase):
""" Same as the standard pipeline, but preserves the func_ir into the
metadata store after legalisation, useful for testing IR changes"""
def define_pipelines(self):
pipeline = DefaultPassBuilder.define_nopython_pipeline(
self.state, "ir_preserving_custom_pipe")
# mangle the default pipeline and inject DCE and IR preservation ahead
# of legalisation
# TODO: add a way to not do this! un-finalizing is not a good idea
pipeline._finalized = False
pipeline.add_pass_after(PreserveIR, IRLegalization)
pipeline.finalize()
return [pipeline]
def print_azure_matrix():
"""This is a utility function that prints out the map of NumPy to Python
versions and how many of that combination are being tested across all the
declared config for azure-pipelines. It is useful to run when updating the
azure-pipelines config to be able to quickly see what the coverage is."""
import yaml
from yaml import Loader
base_path = os.path.dirname(os.path.abspath(__file__))
azure_pipe = os.path.join(base_path, '..', '..', 'azure-pipelines.yml')
if not os.path.isfile(azure_pipe):
raise RuntimeError("'azure-pipelines.yml' is not available")
with open(os.path.abspath(azure_pipe), 'rt') as f:
data = f.read()
pipe_yml = yaml.load(data, Loader=Loader)
templates = pipe_yml['jobs']
# first look at the items in the first two templates, this is osx/linux
py2np_map = defaultdict(lambda: defaultdict(int))
for tmplt in templates[:2]:
matrix = tmplt['parameters']['matrix']
for setup in matrix.values():
py2np_map[setup['NUMPY']][setup['PYTHON']]+=1
# next look at the items in the windows only template
winpath = ['..', '..', 'buildscripts', 'azure', 'azure-windows.yml']
azure_windows = os.path.join(base_path, *winpath)
if not os.path.isfile(azure_windows):
raise RuntimeError("'azure-windows.yml' is not available")
with open(os.path.abspath(azure_windows), 'rt') as f:
data = f.read()
windows_yml = yaml.load(data, Loader=Loader)
# There's only one template in windows and its keyed differently to the
# above, get its matrix.
matrix = windows_yml['jobs'][0]['strategy']['matrix']
for setup in matrix.values():
py2np_map[setup['NUMPY']][setup['PYTHON']]+=1
print("NumPy | Python | Count")
print("-----------------------")
for npver, pys in sorted(py2np_map.items()):
for pyver, count in pys.items():
print(f" {npver} | {pyver:<4} | {count}")
# print the "reverse" map
rev_map = defaultdict(lambda: defaultdict(int))
for npver, pys in sorted(py2np_map.items()):
for pyver, count in pys.items():
rev_map[pyver][npver] = count
print("\nPython | NumPy | Count")
print("-----------------------")
sorter = lambda x: int(x[0].split('.')[1])
for pyver, nps in sorted(rev_map.items(), key=sorter):
for npver, count in nps.items():
print(f" {pyver:<4} | {npver} | {count}")
| IRPreservingTestPipeline |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 48823,
"end": 48894
} | class ____:
xlCreatorCode = 1480803660 # from enum XlCreator
| Creator |
python | apache__thrift | test/py/TestClient.py | {
"start": 14619,
"end": 15197
} | class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
wrapped_proto = make_pedantic(TBinaryProtocol.TBinaryProtocolAcceleratedFactory(fallback=False).getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "ThriftTest")
def get_protocol2(self, transport):
wrapped_proto = make_pedantic(TBinaryProtocol.TBinaryProtocolAcceleratedFactory(fallback=False).getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "SecondService")
| MultiplexedAcceleratedBinaryTest |
python | pytorch__pytorch | torchgen/operator_versions/gen_mobile_upgraders.py | {
"start": 418,
"end": 12381
} | class ____(Enum):
instructions = 1
constants = 2
types = 3
operators = 4
register_size = 5
EXCLUDED_OP_SET = [
"aten::full.names",
"aten::full.out",
"aten::full",
]
EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
ONE_INSTRUCTION = CodeTemplate(
"""
Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
)
INSTRUCTION_LIST = CodeTemplate(
"""std::vector<Instruction>({
${instruction_list}
}), // instructions list"""
)
ONE_CONSTANT = CodeTemplate(
"""
c10::IValue(${constant}),"""
)
CONSTANT_LIST = CodeTemplate(
"""std::vector<c10::IValue>({
${constant_list}
}), // constants list"""
)
CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
TYPE_LIST = CodeTemplate(
"""std::vector<c10::TypePtr>({
${type_list}
}), // types list"""
)
TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
ONE_OPERATOTR_STRING = CodeTemplate(
"""
OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
)
OPERATOR_STRING_LIST = CodeTemplate(
"""
std::vector<OperatorString>({
${operator_string_list}
}), // operators list"""
)
ONE_UPGRADER_FUNCTION = CodeTemplate(
"""
mobile::Function::registerFunc(
"${upgrader_name}",
${instruction_list},
${constant_list},
${type_list},
${register_size}
)"""
)
ONE_UPGRADER_SRC = CodeTemplate(
"""
ByteCodeFunctionWithOperator({
${bytecode_function},
${operator_string_list}
}),"""
)
ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
"""Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
) # noqa: E501
ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
"""
{std::string("${operator_name}"),
std::vector<Upgrader>({
${upgrader_list_in_version_map}
})},"""
)
OPERATOR_VERSION_MAP = CodeTemplate(
"""
const std::unordered_map<std::string, std::vector<Upgrader>>
getOperatorVersionMapForMobile() {
static std::unordered_map<std::string, std::vector<Upgrader>>
operatorVersionMapForMobile({
${operator_list_in_version_map}
});
return operatorVersionMapForMobile;
}
"""
)
UPGRADER_CPP_SRC = CodeTemplate(
MOBILE_UPGRADERS_HEADER_DESCRIPTION
+ """
#include <caffe2/serialize/versions.h>
#include <torch/csrc/jit/mobile/type_parser.h>
#include <torch/csrc/jit/mobile/upgrader_mobile.h>
namespace torch {
namespace jit {
// clang-format off
// From operator_versions_map
${operator_version_map}
const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
auto generate_upgrader_bytecode_list = []() {
std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
${upgrader_bytecode}
});
for (const auto& upgrader_function : upgrader_function_list) {
for (const auto& op : upgrader_function.operators) {
upgrader_function.function.append_operator(
op.name,
op.overload_name,
op.num_specified_args);
}
}
return upgrader_function_list;
};
static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
generate_upgrader_bytecode_list();
return upgraderBytecodeList;
}
// clang-format on
} // namespace jit
} // namespace torch
"""
)
UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
UPGRADER_ELEMENT = CodeTemplate(
"""\
Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
"""
)
PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
"""\
{
std::string(${operator_name}),
std::vector<Upgrader>({${upgrader_list}});
}
"""
)
def construct_instruction(instruction_list_from_yaml: list[Any]) -> str:
instruction_list_part = [
ONE_INSTRUCTION.substitute(
operator_name=instruction[0],
X=instruction[1],
N=instruction[2],
)
for instruction in instruction_list_from_yaml
]
return INSTRUCTION_LIST.substitute(
instruction_list="".join(instruction_list_part).lstrip("\n")
)
def construct_constants(constants_list_from_yaml: list[Any]) -> str:
constants_list_part = []
for constant_from_yaml in constants_list_from_yaml:
convert_constant = None
if isinstance(constant_from_yaml, str):
# Add quotes if it's string
convert_constant = f'"{constant_from_yaml}"'
elif isinstance(constant_from_yaml, bool):
convert_constant = "true" if constant_from_yaml else "false"
elif constant_from_yaml is None:
convert_constant = ""
elif isinstance(constant_from_yaml, int):
convert_constant = str(constant_from_yaml)
else:
raise ValueError(
f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
"Please add change in construct_constants function in gen_mobile_upgraders.py."
)
constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
if len(constants_list_part) == 0:
return CONSTANTS_LIST_EMPTY
return CONSTANT_LIST.substitute(
constant_list="".join(constants_list_part).lstrip("\n")
)
def construct_operators(operator_list_from_yaml: list[Any]) -> str:
operator_list_part = [
ONE_OPERATOTR_STRING.substitute(
operator_name=operator[0],
overload_name=operator[1],
num_of_args=operator[2],
)
for operator in operator_list_from_yaml
]
return OPERATOR_STRING_LIST.substitute(
operator_string_list="".join(operator_list_part).lstrip("\n")
)
def construct_types(types_tr_list_from_yaml: list[Any]) -> str:
types_tr_list_part = [
ONE_TYPE.substitute(type_str=types_tr) for types_tr in types_tr_list_from_yaml
]
if len(types_tr_list_part) == 0:
return TYPE_LIST_EMPTY
return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
def construct_register_size(register_size_from_yaml: int) -> str:
if not isinstance(register_size_from_yaml, int):
raise ValueError(
f"Input register size is {register_size_from_yaml} and"
"it's type is {type(register_size_from_yaml)}. An int type is expected."
)
return str(register_size_from_yaml)
def construct_version_maps(
upgrader_bytecode_function_to_index_map: dict[str, Any],
) -> str:
version_map = torch._C._get_operator_version_map()
sorted_version_map_ = sorted(version_map.items(), key=itemgetter(0)) # type: ignore[no-any-return]
sorted_version_map = dict(sorted_version_map_)
operator_list_in_version_map_part = []
for op_name in sorted_version_map:
upgraders_in_version_map_part = []
# TODO: remove the skip after these two operators schemas are fixed
if op_name in EXCLUDED_OP_SET:
continue
upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
upgrader_entries = sorted_version_map[op_name]
assert len(upgrader_ranges) == len(upgrader_entries)
for idx, upgrader_entry in enumerate(upgrader_entries):
upgrader_name = upgrader_entry.upgrader_name
bytecode_function_index = upgrader_bytecode_function_to_index_map[
upgrader_name
]
upgraders_in_version_map_part.append(
ONE_UPGRADER_IN_VERSION_MAP.substitute(
upgrader_min_version=upgrader_ranges[idx].min_version,
upgrader_max_version=upgrader_ranges[idx].max_version,
upgrader_name=upgrader_name,
bytecode_func_index=bytecode_function_index,
)
)
operator_list_in_version_map_part.append(
ONE_OPERATOR_IN_VERSION_MAP.substitute(
operator_name=op_name,
upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
)
)
return OPERATOR_VERSION_MAP.substitute(
operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
"\n"
)
)
def get_upgrader_bytecode_function_to_index_map(
upgrader_dict: list[dict[str, Any]],
) -> dict[str, Any]:
upgrader_bytecode_function_to_index_map = {}
index = 0
for upgrader_bytecode in upgrader_dict:
for upgrader_name in upgrader_bytecode:
if upgrader_name in EXCLUE_UPGRADER_SET:
continue
upgrader_bytecode_function_to_index_map[upgrader_name] = index
index += 1
return upgrader_bytecode_function_to_index_map
def write_cpp(cpp_path: str, upgrader_dict: list[dict[str, Any]]) -> None:
upgrader_bytecode_function_to_index_map = (
get_upgrader_bytecode_function_to_index_map(upgrader_dict)
)
version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
all_upgrader_src_string = []
for upgrader_bytecode in upgrader_dict:
for upgrader_name, bytecode in upgrader_bytecode.items():
# TODO: remove the skip after these two operators schemas are fixed
if upgrader_name in EXCLUE_UPGRADER_SET:
continue
instruction_list_str = ""
constant_list_str = ""
type_list_str = ""
register_size_str = ""
operator_list_str = ""
for table_name, contents in bytecode.items():
element = ByteCode[table_name]
if element is ByteCode.instructions:
instruction_list_str = construct_instruction(contents)
elif element is ByteCode.constants:
constant_list_str = construct_constants(contents)
elif element is ByteCode.operators:
operator_list_str = construct_operators(contents)
elif element is ByteCode.types:
type_list_str = construct_types(contents)
elif element is ByteCode.register_size:
register_size_str = construct_register_size(contents)
one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
upgrader_name=upgrader_name,
instruction_list=instruction_list_str,
constant_list=constant_list_str,
type_list=type_list_str,
register_size=register_size_str,
)
one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
bytecode_function=one_upgrader_function_string.lstrip("\n"),
operator_string_list=operator_list_str.lstrip("\n"),
)
all_upgrader_src_string.append(one_upgrader_src_string)
upgrader_file_content = UPGRADER_CPP_SRC.substitute(
operator_version_map=version_map_src,
upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
)
print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
out_file.write(upgrader_file_content.encode("utf-8"))
def sort_upgrader(upgrader_list: list[dict[str, Any]]) -> list[dict[str, Any]]:
sorted_upgrader_list = sorted(
upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
)
return sorted_upgrader_list
def main() -> None:
upgrader_list = generate_upgraders_bytecode()
sorted_upgrader_list = sort_upgrader(upgrader_list)
for up in sorted_upgrader_list:
print("after sort upgrader : ", next(iter(up)))
pytorch_dir = Path(__file__).resolve().parents[2]
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
write_cpp(str(upgrader_path), sorted_upgrader_list)
if __name__ == "__main__":
main()
| ByteCode |
python | plotly__plotly.py | plotly/graph_objs/layout/_grid.py | {
"start": 235,
"end": 18443
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.grid"
_valid_props = {
"columns",
"domain",
"pattern",
"roworder",
"rows",
"subplots",
"xaxes",
"xgap",
"xside",
"yaxes",
"ygap",
"yside",
}
@property
def columns(self):
"""
The number of columns in the grid. If you provide a 2D
`subplots` array, the length of its longest row is used as the
default. If you give an `xaxes` array, its length is used as
the default. But it's also possible to have a different length,
if you want to leave a row at the end for non-cartesian
subplots.
The 'columns' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["columns"]
@columns.setter
def columns(self, val):
self["columns"] = val
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.grid.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Returns
-------
plotly.graph_objs.layout.grid.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
@property
def pattern(self):
"""
If no `subplots`, `xaxes`, or `yaxes` are given but we do have
`rows` and `columns`, we can generate defaults using
consecutive axis IDs, in two ways: "coupled" gives one x axis
per column and one y axis per row. "independent" uses a new xy
pair for each cell, left-to-right across each row then
iterating rows according to `roworder`.
The 'pattern' property is an enumeration that may be specified as:
- One of the following enumeration values:
['independent', 'coupled']
Returns
-------
Any
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def roworder(self):
"""
Is the first row the top or the bottom? Note that columns are
always enumerated from left to right.
The 'roworder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top to bottom', 'bottom to top']
Returns
-------
Any
"""
return self["roworder"]
@roworder.setter
def roworder(self, val):
self["roworder"] = val
@property
def rows(self):
"""
The number of rows in the grid. If you provide a 2D `subplots`
array or a `yaxes` array, its length is used as the default.
But it's also possible to have a different length, if you want
to leave a row at the end for non-cartesian subplots.
The 'rows' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["rows"]
@rows.setter
def rows(self, val):
self["rows"] = val
@property
def subplots(self):
"""
Used for freeform grids, where some axes may be shared across
subplots but others are not. Each entry should be a cartesian
subplot id, like "xy" or "x3y2", or "" to leave that cell
empty. You may reuse x axes within the same column, and y axes
within the same row. Non-cartesian subplots and traces that
support `domain` can place themselves in this grid separately
using the `gridcell` attribute.
The 'subplots' property is an info array that may be specified as:
* a 2D list where:
The 'subplots[i][j]' property is an enumeration that may be specified as:
- One of the following enumeration values:
['']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?y([2-9]|[1-9][0-9]+)?$']
Returns
-------
list
"""
return self["subplots"]
@subplots.setter
def subplots(self, val):
self["subplots"] = val
@property
def xaxes(self):
"""
Used with `yaxes` when the x and y axes are shared across
columns and rows. Each entry should be an x axis id like "x",
"x2", etc., or "" to not put an x axis in that column. Entries
other than "" must be unique. Ignored if `subplots` is present.
If missing but `yaxes` is present, will generate consecutive
IDs.
The 'xaxes' property is an info array that may be specified as:
* a list of elements where:
The 'xaxes[i]' property is an enumeration that may be specified as:
- One of the following enumeration values:
['']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
list
"""
return self["xaxes"]
@xaxes.setter
def xaxes(self, val):
self["xaxes"] = val
@property
def xgap(self):
"""
Horizontal space between grid cells, expressed as a fraction of
the total width available to one cell. Defaults to 0.1 for
coupled-axes grids and 0.2 for independent grids.
The 'xgap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["xgap"]
@xgap.setter
def xgap(self, val):
self["xgap"] = val
@property
def xside(self):
"""
Sets where the x axis labels and titles go. "bottom" means the
very bottom of the grid. "bottom plot" is the lowest plot that
each x axis is used in. "top" and "top plot" are similar.
The 'xside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['bottom', 'bottom plot', 'top plot', 'top']
Returns
-------
Any
"""
return self["xside"]
@xside.setter
def xside(self, val):
self["xside"] = val
@property
def yaxes(self):
"""
Used with `yaxes` when the x and y axes are shared across
columns and rows. Each entry should be an y axis id like "y",
"y2", etc., or "" to not put a y axis in that row. Entries
other than "" must be unique. Ignored if `subplots` is present.
If missing but `xaxes` is present, will generate consecutive
IDs.
The 'yaxes' property is an info array that may be specified as:
* a list of elements where:
The 'yaxes[i]' property is an enumeration that may be specified as:
- One of the following enumeration values:
['']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
list
"""
return self["yaxes"]
@yaxes.setter
def yaxes(self, val):
self["yaxes"] = val
@property
def ygap(self):
"""
Vertical space between grid cells, expressed as a fraction of
the total height available to one cell. Defaults to 0.1 for
coupled-axes grids and 0.3 for independent grids.
The 'ygap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["ygap"]
@ygap.setter
def ygap(self, val):
self["ygap"] = val
@property
def yside(self):
"""
Sets where the y axis labels and titles go. "left" means the
very left edge of the grid. *left plot* is the leftmost plot
that each y axis is used in. "right" and *right plot* are
similar.
The 'yside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'left plot', 'right plot', 'right']
Returns
-------
Any
"""
return self["yside"]
@yside.setter
def yside(self, val):
self["yside"] = val
@property
def _prop_descriptions(self):
return """\
columns
The number of columns in the grid. If you provide a 2D
`subplots` array, the length of its longest row is used
as the default. If you give an `xaxes` array, its
length is used as the default. But it's also possible
to have a different length, if you want to leave a row
at the end for non-cartesian subplots.
domain
:class:`plotly.graph_objects.layout.grid.Domain`
instance or dict with compatible properties
pattern
If no `subplots`, `xaxes`, or `yaxes` are given but we
do have `rows` and `columns`, we can generate defaults
using consecutive axis IDs, in two ways: "coupled"
gives one x axis per column and one y axis per row.
"independent" uses a new xy pair for each cell, left-
to-right across each row then iterating rows according
to `roworder`.
roworder
Is the first row the top or the bottom? Note that
columns are always enumerated from left to right.
rows
The number of rows in the grid. If you provide a 2D
`subplots` array or a `yaxes` array, its length is used
as the default. But it's also possible to have a
different length, if you want to leave a row at the end
for non-cartesian subplots.
subplots
Used for freeform grids, where some axes may be shared
across subplots but others are not. Each entry should
be a cartesian subplot id, like "xy" or "x3y2", or ""
to leave that cell empty. You may reuse x axes within
the same column, and y axes within the same row. Non-
cartesian subplots and traces that support `domain` can
place themselves in this grid separately using the
`gridcell` attribute.
xaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an x axis
id like "x", "x2", etc., or "" to not put an x axis in
that column. Entries other than "" must be unique.
Ignored if `subplots` is present. If missing but
`yaxes` is present, will generate consecutive IDs.
xgap
Horizontal space between grid cells, expressed as a
fraction of the total width available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.2 for
independent grids.
xside
Sets where the x axis labels and titles go. "bottom"
means the very bottom of the grid. "bottom plot" is the
lowest plot that each x axis is used in. "top" and "top
plot" are similar.
yaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an y axis
id like "y", "y2", etc., or "" to not put a y axis in
that row. Entries other than "" must be unique. Ignored
if `subplots` is present. If missing but `xaxes` is
present, will generate consecutive IDs.
ygap
Vertical space between grid cells, expressed as a
fraction of the total height available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.3 for
independent grids.
yside
Sets where the y axis labels and titles go. "left"
means the very left edge of the grid. *left plot* is
the leftmost plot that each y axis is used in. "right"
and *right plot* are similar.
"""
def __init__(
self,
arg=None,
columns=None,
domain=None,
pattern=None,
roworder=None,
rows=None,
subplots=None,
xaxes=None,
xgap=None,
xside=None,
yaxes=None,
ygap=None,
yside=None,
**kwargs,
):
"""
Construct a new Grid object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Grid`
columns
The number of columns in the grid. If you provide a 2D
`subplots` array, the length of its longest row is used
as the default. If you give an `xaxes` array, its
length is used as the default. But it's also possible
to have a different length, if you want to leave a row
at the end for non-cartesian subplots.
domain
:class:`plotly.graph_objects.layout.grid.Domain`
instance or dict with compatible properties
pattern
If no `subplots`, `xaxes`, or `yaxes` are given but we
do have `rows` and `columns`, we can generate defaults
using consecutive axis IDs, in two ways: "coupled"
gives one x axis per column and one y axis per row.
"independent" uses a new xy pair for each cell, left-
to-right across each row then iterating rows according
to `roworder`.
roworder
Is the first row the top or the bottom? Note that
columns are always enumerated from left to right.
rows
The number of rows in the grid. If you provide a 2D
`subplots` array or a `yaxes` array, its length is used
as the default. But it's also possible to have a
different length, if you want to leave a row at the end
for non-cartesian subplots.
subplots
Used for freeform grids, where some axes may be shared
across subplots but others are not. Each entry should
be a cartesian subplot id, like "xy" or "x3y2", or ""
to leave that cell empty. You may reuse x axes within
the same column, and y axes within the same row. Non-
cartesian subplots and traces that support `domain` can
place themselves in this grid separately using the
`gridcell` attribute.
xaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an x axis
id like "x", "x2", etc., or "" to not put an x axis in
that column. Entries other than "" must be unique.
Ignored if `subplots` is present. If missing but
`yaxes` is present, will generate consecutive IDs.
xgap
Horizontal space between grid cells, expressed as a
fraction of the total width available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.2 for
independent grids.
xside
Sets where the x axis labels and titles go. "bottom"
means the very bottom of the grid. "bottom plot" is the
lowest plot that each x axis is used in. "top" and "top
plot" are similar.
yaxes
Used with `yaxes` when the x and y axes are shared
across columns and rows. Each entry should be an y axis
id like "y", "y2", etc., or "" to not put a y axis in
that row. Entries other than "" must be unique. Ignored
if `subplots` is present. If missing but `xaxes` is
present, will generate consecutive IDs.
ygap
Vertical space between grid cells, expressed as a
fraction of the total height available to one cell.
Defaults to 0.1 for coupled-axes grids and 0.3 for
independent grids.
yside
Sets where the y axis labels and titles go. "left"
means the very left edge of the grid. *left plot* is
the leftmost plot that each y axis is used in. "right"
and *right plot* are similar.
Returns
-------
Grid
"""
super().__init__("grid")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Grid
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Grid`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("columns", arg, columns)
self._set_property("domain", arg, domain)
self._set_property("pattern", arg, pattern)
self._set_property("roworder", arg, roworder)
self._set_property("rows", arg, rows)
self._set_property("subplots", arg, subplots)
self._set_property("xaxes", arg, xaxes)
self._set_property("xgap", arg, xgap)
self._set_property("xside", arg, xside)
self._set_property("yaxes", arg, yaxes)
self._set_property("ygap", arg, ygap)
self._set_property("yside", arg, yside)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Grid |
python | tornadoweb__tornado | tornado/test/template_test.py | {
"start": 10075,
"end": 10689
} | class ____(unittest.TestCase):
def test_details(self):
loader = DictLoader({"foo.html": "\n\n{{"})
with self.assertRaises(ParseError) as cm:
loader.load("foo.html")
self.assertEqual("Missing end expression }} at foo.html:3", str(cm.exception))
self.assertEqual("foo.html", cm.exception.filename)
self.assertEqual(3, cm.exception.lineno)
def test_custom_parse_error(self):
# Make sure that ParseErrors remain compatible with their
# pre-4.3 signature.
self.assertEqual("asdf at None:0", str(ParseError("asdf")))
| ParseErrorDetailTest |
python | psf__black | src/black/trans.py | {
"start": 11019,
"end": 11609
} | class ____:
"""A custom (i.e. manual) string split.
A single CustomSplit instance represents a single substring.
Examples:
Consider the following string:
```
"Hi there friend."
" This is a custom"
f" string {split}."
```
This string will correspond to the following three CustomSplit instances:
```
CustomSplit(False, 16)
CustomSplit(False, 17)
CustomSplit(True, 16)
```
"""
has_prefix: bool
break_idx: int
CustomSplitMapKey = tuple[StringID, str]
@trait
| CustomSplit |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 213429,
"end": 225648
} | class ____(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
TEST_WITH_SLOW_GRADCHECK: bool = TestEnvironment.def_flag(
"TEST_WITH_SLOW_GRADCHECK",
env_var="PYTORCH_TEST_WITH_SLOW_GRADCHECK",
)
skipIfSlowGradcheckEnv = unittest.skipIf(
TEST_WITH_SLOW_GRADCHECK,
"Tests that don't use gradcheck don't need to run on slow_gradcheck CI",
)
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if TEST_WITH_SLOW_GRADCHECK:
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if TEST_WITH_SLOW_GRADCHECK:
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# FIXME: delete this
# Using @toleranceOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
# FIXME: move to test_sparse or sparse utils
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
def is_coalesced_indices(s):
indices = s._indices()
hash_coeffs = (1,) + s.shape[s.sparse_dim() - 1:0:-1]
hash_indices = torch.tensor(hash_coeffs, device=s.device).cumprod(-1).flip(-1)
if s.sparse_dim() > 1:
hash_indices.unsqueeze_(-1)
hash_indices = (indices * hash_indices).sum(0)
else:
hash_indices = indices * hash_indices
# check if indices are sorted
res = torch.allclose(hash_indices, hash_indices.sort()[0])
# check if there are no repeated indices
res = res and torch.allclose(hash_indices, hash_indices.unique())
return res
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parents[2]
return torch_root / 'build' / 'lib' / lib_name
def skip_but_pass_in_sandcastle(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: list[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.uint16: ctypes.c_uint16,
torch.uint32: ctypes.c_uint32,
torch.uint64: ctypes.c_uint64,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__ # type: ignore[attr-defined]
return g
def xfail_inherited_tests(tests):
"""
Given a list of test names which are defined by a superclass of the
class this decorates, mark them as expected failure. This is useful
if you are doing poor man's parameterized tests by subclassing a generic
test class.
"""
def deco(cls):
for t in tests:
# NB: expectedFailure operates by mutating the method in question,
# which is why you have to copy the function first
setattr(cls, t, unittest.expectedFailure(copy_func(getattr(cls, t))))
return cls
return deco
def skip_but_pass_in_sandcastle_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if condition:
if IS_SANDCASTLE:
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return wrapper
else:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
@functools.lru_cache
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = [measure() for _ in range(num)]
vals = sorted(vals)
return mean(vals[2 : num - 2])
# OpInfo utils
T = TypeVar('T')
def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T:
"""
Returns the first sample from an iterable of samples, like those returned by OpInfo.
The test will be skipped if no samples are available.
"""
try:
return next(iter(samples))
except StopIteration as e:
raise unittest.SkipTest('Skipped! Need at least 1 sample input') from e
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
@contextmanager
def custom_op(opname, symbolic_fn, opset_version):
"""Context manager/decorator to test ONNX export with custom operator"""
try:
register_custom_op_symbolic(opname, symbolic_fn, opset_version)
yield
finally:
unregister_custom_op_symbolic(opname, opset_version)
def outs_and_grads(fn, graph_inps, inps):
outs = fn(*graph_inps)
for out in pytree.tree_leaves(outs):
if isinstance(out, torch.Tensor) and out.requires_grad:
out.sum().backward(retain_graph=True)
grads = [inp.grad for inp in pytree.tree_leaves(inps) if isinstance(inp, torch.Tensor)]
for inp in pytree.tree_leaves(inps):
if isinstance(inp, torch.Tensor):
inp.grad = None
return outs, grads
def compare_equal_outs_and_grads(test, m1, m2, inps):
r1, g1 = outs_and_grads(m1, inps, inps)
r2, g2 = outs_and_grads(m2, inps, inps)
test.assertEqual(r1, r2)
test.assertEqual(g1, g2)
| BytesIOContext |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 14651,
"end": 14947
} | class ____(DagsterUserCodeExecutionError):
"""Indicates that an unexpected error occurred while executing the body of a config mapping
function defined in a :py:class:`~dagster.JobDefinition` or `~dagster.GraphDefinition` during
config parsing.
"""
| DagsterConfigMappingFunctionError |
python | getsentry__sentry | src/sentry/integrations/models/external_issue.py | {
"start": 2217,
"end": 3523
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
# The foreign key here is an `int`, not `bigint`.
organization = FlexibleForeignKey("sentry.Organization", db_constraint=False)
integration_id = HybridCloudForeignKey("sentry.Integration", on_delete="CASCADE")
key = models.CharField(max_length=256) # example APP-123 in jira
date_added = models.DateTimeField(default=timezone.now)
title = models.TextField(null=True)
description = models.TextField(null=True)
metadata = LegacyTextJSONField(default=dict, null=True)
objects: ClassVar[ExternalIssueManager] = ExternalIssueManager()
class Meta:
app_label = "sentry"
db_table = "sentry_externalissue"
unique_together = (("organization", "integration_id", "key"),)
__repr__ = sane_repr("organization_id", "integration_id", "key")
def get_installation(self) -> Any:
from sentry.integrations.services.integration import integration_service
integration = integration_service.get_integration(
integration_id=self.integration_id, status=ObjectStatus.ACTIVE
)
assert integration, "Integration is required to get an installation"
return integration.get_installation(organization_id=self.organization_id)
| ExternalIssue |
python | getsentry__sentry | tests/sentry/integrations/cursor/test_webhook.py | {
"start": 293,
"end": 11171
} | class ____(APITestCase):
endpoint = "sentry-extensions-cursor-webhook"
def setUp(self):
super().setUp()
# Create a Cursor integration linked to this organization
self.integration = self.create_integration(
organization=self.organization,
provider="cursor",
name="Cursor Agent",
external_id="cursor",
metadata={
"api_key": "test_api_key_123",
"domain_name": "cursor.sh",
"webhook_secret": "secret123",
},
)
self.installation = self.integration.get_installation(organization_id=self.organization.id)
def _url(self) -> str:
return reverse(
"sentry-extensions-cursor-webhook",
kwargs={"organization_id": self.organization.id},
)
def _signed_headers(self, body: bytes, secret: str | None = None) -> dict[str, str]:
used_secret = secret or self.integration.metadata["webhook_secret"]
signature = hmac.new(used_secret.encode("utf-8"), body, hashlib.sha256).hexdigest()
return {"HTTP_X_WEBHOOK_SIGNATURE": f"sha256={signature}"}
def _post_with_headers(self, body: bytes, headers: dict[str, str]):
# mypy: The DRF APIClient stubs can misinterpret **extra headers as a positional arg.
client: Any = self.client
return client.post(self._url(), data=body, content_type="application/json", **headers)
def _build_status_payload(
self,
*,
id: str | None = "agent-1",
status: str = "FINISHED",
repo: str = "github.com/testorg/testrepo",
ref: str | None = "main",
pr_url: str | None = "https://github.com/testorg/testrepo/pull/1",
agent_url: str | None = "https://cursor.sh/agents/1",
summary: str | None = "All done",
) -> dict[str, Any]:
return {
"event": "statusChange",
"id": id,
"status": status,
"source": {"repository": repo, "ref": ref},
"target": {"prUrl": pr_url, "url": agent_url},
"summary": summary,
}
@patch("sentry.integrations.cursor.webhooks.handler.update_coding_agent_state")
def test_happy_path_finished(self, mock_update_state):
payload = self._build_status_payload(status="FINISHED")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 204
# Validate call to update_coding_agent_state
assert mock_update_state.call_count == 1
args, kwargs = mock_update_state.call_args
assert kwargs["agent_id"] == "agent-1"
assert kwargs["status"].name == "COMPLETED"
assert kwargs["agent_url"] == "https://cursor.sh/agents/1"
result = kwargs["result"]
assert result.repo_full_name == "testorg/testrepo"
assert result.repo_provider == "github"
assert result.pr_url == "https://github.com/testorg/testrepo/pull/1"
def test_feature_flag_disabled(self):
payload = self._build_status_payload(status="FINISHED")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": False}):
response = self._post_with_headers(body, headers)
assert response.status_code == 404
def test_invalid_method(self):
with pytest.raises(MethodNotAllowed):
self.client.get(self._url())
def test_invalid_json(self):
body = b"{bad json}"
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 400
def test_missing_signature(self):
payload = self._build_status_payload()
body = orjson.dumps(payload)
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self.client.post(self._url(), data=body, content_type="application/json")
assert response.status_code == 403
def test_invalid_signature(self):
payload = self._build_status_payload()
body = orjson.dumps(payload)
headers = {"HTTP_X_WEBHOOK_SIGNATURE": "sha256=deadbeef"}
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 403
@patch(
"sentry.integrations.cursor.webhooks.handler.CursorWebhookEndpoint._get_cursor_integration_secret",
return_value=None,
)
def test_no_webhook_secret_set(self, _mock_secret):
payload = self._build_status_payload()
body = orjson.dumps(payload)
# Provide any signature header so we hit secret lookup path
headers = {"HTTP_X_WEBHOOK_SIGNATURE": "sha256=deadbeef"}
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 403
@patch("sentry.integrations.cursor.webhooks.handler.update_coding_agent_state")
def test_error_status_maps_to_failed(self, mock_update_state):
payload = self._build_status_payload(status="ERROR", pr_url=None)
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 204
args, kwargs = mock_update_state.call_args
assert kwargs["status"].name == "FAILED"
# pr_url should be None for failures
assert kwargs["result"].pr_url is None
@patch("sentry.integrations.cursor.webhooks.handler.update_coding_agent_state")
def test_unknown_status_logs_and_defaults_to_failed(self, mock_update_state):
payload = self._build_status_payload(status="WEIRD")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 204
args, kwargs = mock_update_state.call_args
assert kwargs["status"].name == "FAILED"
def test_missing_agent_id_or_status(self):
# Missing id
body = orjson.dumps(self._build_status_payload(id=None))
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
# Missing status
payload = self._build_status_payload()
payload.pop("status")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
@patch("sentry.integrations.cursor.webhooks.handler.update_coding_agent_state")
def test_repo_variants_and_validation(self, mock_update_state):
# Missing repo
payload = self._build_status_payload()
payload["source"].pop("repository")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
mock_update_state.assert_not_called()
# Non-github host
payload = self._build_status_payload(repo="https://gitlab.com/testorg/testrepo")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
mock_update_state.assert_not_called()
# Bad format path
payload = self._build_status_payload(repo="github.com/not-a-valid-path")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
mock_update_state.assert_not_called()
# No scheme but valid host should work
payload = self._build_status_payload(repo="github.com/testorg/testrepo")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
assert mock_update_state.call_count == 1
# Dotted repo name should be accepted
mock_update_state.reset_mock()
payload = self._build_status_payload(repo="github.com/testorg/test.repo")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
resp = self._post_with_headers(body, headers)
assert resp.status_code == 204
assert mock_update_state.call_count == 1
@patch("sentry.integrations.cursor.webhooks.handler.update_coding_agent_state")
def test_signature_without_prefix(self, mock_update_state):
payload = self._build_status_payload(status="FINISHED")
body = orjson.dumps(payload)
secret = self.integration.metadata["webhook_secret"]
signature = hmac.new(secret.encode("utf-8"), body, hashlib.sha256).hexdigest()
headers = {"HTTP_X_WEBHOOK_SIGNATURE": signature}
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 204
@patch("sentry.integrations.cursor.webhooks.handler.update_coding_agent_state")
def test_seer_api_error_is_caught(self, mock_update_state):
from sentry.seer.models import SeerApiError
mock_update_state.side_effect = SeerApiError("boom", status=500)
payload = self._build_status_payload(status="FINISHED")
body = orjson.dumps(payload)
headers = self._signed_headers(body)
with Feature({"organizations:seer-coding-agent-integrations": True}):
response = self._post_with_headers(body, headers)
assert response.status_code == 204
# Even with exception, endpoint must not raise
| TestCursorWebhook |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_pclass.py | {
"start": 8057,
"end": 9767
} | class ____(object):
__slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields')
def __init__(self, original, initial_dict):
self._pclass_evolver_original = original
self._pclass_evolver_data = initial_dict
self._pclass_evolver_data_is_dirty = False
self._factory_fields = set()
def __getitem__(self, item):
return self._pclass_evolver_data[item]
def set(self, key, value):
if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value:
self._pclass_evolver_data[key] = value
self._factory_fields.add(key)
self._pclass_evolver_data_is_dirty = True
return self
def __setitem__(self, key, value):
self.set(key, value)
def remove(self, item):
if item in self._pclass_evolver_data:
del self._pclass_evolver_data[item]
self._factory_fields.discard(item)
self._pclass_evolver_data_is_dirty = True
return self
raise AttributeError(item)
def __delitem__(self, item):
self.remove(item)
def persistent(self):
if self._pclass_evolver_data_is_dirty:
return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields,
**self._pclass_evolver_data)
return self._pclass_evolver_original
def __setattr__(self, key, value):
if key not in self.__slots__:
self.set(key, value)
else:
super(_PClassEvolver, self).__setattr__(key, value)
def __getattr__(self, item):
return self[item]
| _PClassEvolver |
python | GoogleCloudPlatform__python-docs-samples | logging/redaction/log_redaction.py | {
"start": 1752,
"end": 4841
} | class ____(DoFn):
"""Ingest payloads into destination log"""
def __init__(self, destination_log_name):
self.destination_log_name = destination_log_name
self.logger = None
def _replace_log_name(self, entry):
# update log name in the entry with destination log
entry["logName"] = self.logger.name
return entry
def setup(self):
# initialize logging client
if self.logger:
return
logging_client = logging_v2.Client()
if not logging_client:
logging.error("Cannot create GCP Logging Client")
raise PipelineError("Cannot create GCP Logging Client")
self.logger = logging_client.logger(self.destination_log_name)
if not self.logger:
logging.error("Google client library cannot create Logger object")
raise PipelineError("Google client library cannot create Logger object")
def process(self, element):
if self.logger:
logs = list(map(self._replace_log_name, element))
self.logger.client.logging_api.write_entries(logs)
yield logs
def run(
pubsub_subscription: str,
destination_log_name: str,
window_size: float,
pipeline_args: list[str] = None,
) -> None:
"""Runs Dataflow pipeline"""
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
# TODO: Read job's deployment region
pipeline = Pipeline(options=pipeline_options)
_ = (
pipeline
| "Read log entries from Pub/Sub"
>> io.ReadFromPubSub(subscription=pubsub_subscription)
| "Convert log entry payload to Json" >> ParDo(PayloadAsJson())
| "Aggregate payloads in fixed time intervals"
>> WindowInto(FixedWindows(window_size))
# Optimize Google API consumption and avoid possible throttling
# by calling APIs for batched data and not per each element
| "Batch aggregated payloads"
>> CombineGlobally(BatchPayloads()).without_defaults()
# TODO: Placeholder for redaction transformation
| "Ingest to output log" >> ParDo(IngestLogs(destination_log_name))
)
pipeline.run()
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--pubsub_subscription",
help="The Cloud Pub/Sub subscription to read from in the format "
'"projects/<PROJECT_ID>/subscription/<SUBSCRIPTION_ID>".',
)
parser.add_argument(
"--destination_log_name",
help="The log name to ingest log entries in the format "
'"projects/<PROJECT_ID>/logs/<LOG_ID>".',
)
parser.add_argument(
"--window_size",
type=float,
default=60.0,
help="Output file's window size in seconds.",
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args.pubsub_subscription,
known_args.destination_log_name,
known_args.window_size,
pipeline_args,
)
| IngestLogs |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_video_intelligence.py | {
"start": 1462,
"end": 4850
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.video_intelligence.CloudVideoIntelligenceHook")
def test_detect_video_labels_green_path(self, mock_hook):
mocked_operation = mock.Mock()
mocked_operation.result = mock.Mock(return_value=AnnotateVideoResponse(annotation_results=[]))
mock_hook.return_value.annotate_video.return_value = mocked_operation
CloudVideoIntelligenceDetectVideoLabelsOperator(
input_uri=INPUT_URI,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
).execute(context={"task_instance": mock.Mock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.annotate_video.assert_called_once_with(
input_uri=INPUT_URI,
features=[Feature.LABEL_DETECTION],
input_content=None,
video_context=None,
location=None,
retry=DEFAULT,
timeout=None,
)
@mock.patch("airflow.providers.google.cloud.operators.video_intelligence.CloudVideoIntelligenceHook")
def test_detect_video_explicit_content_green_path(self, mock_hook):
mocked_operation = mock.Mock()
mocked_operation.result = mock.Mock(return_value=AnnotateVideoResponse(annotation_results=[]))
mock_hook.return_value.annotate_video.return_value = mocked_operation
CloudVideoIntelligenceDetectVideoExplicitContentOperator(
input_uri=INPUT_URI,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
).execute(context={"task_instance": mock.Mock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.annotate_video.assert_called_once_with(
input_uri=INPUT_URI,
features=[Feature.EXPLICIT_CONTENT_DETECTION],
input_content=None,
video_context=None,
location=None,
retry=DEFAULT,
timeout=None,
)
@mock.patch("airflow.providers.google.cloud.operators.video_intelligence.CloudVideoIntelligenceHook")
def test_detect_video_shots_green_path(self, mock_hook):
mocked_operation = mock.Mock()
mocked_operation.result = mock.Mock(return_value=AnnotateVideoResponse(annotation_results=[]))
mock_hook.return_value.annotate_video.return_value = mocked_operation
CloudVideoIntelligenceDetectVideoShotsOperator(
input_uri=INPUT_URI,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
).execute(context={"task_instance": mock.Mock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.annotate_video.assert_called_once_with(
input_uri=INPUT_URI,
features=[Feature.SHOT_CHANGE_DETECTION],
input_content=None,
video_context=None,
location=None,
retry=DEFAULT,
timeout=None,
)
| TestCloudVideoIntelligenceOperators |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_schemas.py | {
"start": 2979,
"end": 3542
} | class ____:
"""A formal parameter of an operator."""
name: str
type_constraint: TypeConstraintParam
required: bool
variadic: bool
default: Any = _EMPTY_DEFAULT
# TODO: Add other properties too
def __str__(self) -> str:
type_str = self.type_constraint.name
if self.has_default():
return f"{self.name}: {type_str} = {self.default}"
return f"{self.name}: {type_str}"
def has_default(self) -> bool:
return self.default is not _EMPTY_DEFAULT
@dataclasses.dataclass(frozen=True)
| Parameter |
python | astropy__astropy | astropy/io/fits/column.py | {
"start": 8768,
"end": 10639
} | class ____(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ("P", "Q"):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == "P":
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ""
else:
repeat = str(self.repeat)
return f"{repeat}{self.format}{self.option}"
| _ColumnFormat |
python | getsentry__sentry | src/sentry/auth/access.py | {
"start": 12856,
"end": 13384
} | class ____:
access: RpcBackedAccess
def maybe_singular_rpc_access_org_context(
access: Access, org_ids: set[int]
) -> SingularRpcAccessOrgOptimization | None:
if (
isinstance(access, RpcBackedAccess)
and len(org_ids) == 1
and access.rpc_user_organization_context.organization.id in org_ids
):
return SingularRpcAccessOrgOptimization(access)
return None
maybe_singular_api_access_org_context = maybe_singular_rpc_access_org_context
@dataclass
| SingularRpcAccessOrgOptimization |
python | getsentry__sentry | tests/sentry/tasks/test_auth.py | {
"start": 2012,
"end": 3557
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="bar@example.com")
self.organization = self.create_organization(name="Test")
with assume_test_silo_mode(SiloMode.CONTROL):
self.provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
om = self.create_member(
user_id=self.user.id,
organization=self.organization,
flags=OrganizationMember.flags["sso:linked"],
)
assert om.flags["sso:linked"]
self.user2 = self.create_user(email="baz@example.com")
om2 = self.create_member(user_id=self.user2.id, organization=self.organization, flags=0)
assert not om2.flags["sso:linked"]
def test_email_missing_links(self) -> None:
with self.tasks():
email_missing_links(self.organization.id, self.user.id, self.provider.provider)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user2.email]
assert "to enable signing on with your Dummy account" in message.body
assert "SSO link request invoked by bar@example.com" in message.body
def test_email_missing_links_organization_deleted(self) -> None:
self.organization.delete()
with self.tasks():
email_missing_links(self.organization.id, self.user.id, self.provider.provider)
assert len(mail.outbox) == 0
| EmailMissingLinksTest |
python | huggingface__transformers | src/transformers/models/sew/modeling_sew.py | {
"start": 28863,
"end": 34585
} | class ____(SEWPreTrainedModel):
def __init__(self, config: SEWConfig):
super().__init__(config)
self.config = config
self.feature_extractor = SEWFeatureEncoder(config)
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.project_features = config.conv_dim[-1] != config.hidden_size
if self.project_features:
self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = SEWEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
extract_features = self.layer_norm(extract_features)
if self.project_features:
extract_features = self.feature_projection(extract_features)
hidden_states = self.feature_dropout(extract_features)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
_HIDDEN_STATES_START_POSITION = 1
@auto_docstring(
custom_intro="""
SEW Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
"""
)
| SEWModel |
python | numba__numba | numba/cuda/tests/cudadrv/test_select_device.py | {
"start": 511,
"end": 987
} | class ____(ContextResettingTestCase):
def test_select_device(self):
exception_queue = Queue()
for i in range(10):
t = threading.Thread(target=newthread, args=(exception_queue,))
t.start()
t.join()
exceptions = []
while not exception_queue.empty():
exceptions.append(exception_queue.get())
self.assertEqual(exceptions, [])
if __name__ == '__main__':
unittest.main()
| TestSelectDevice |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 542507,
"end": 542985
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateRepositoryRuleset"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "ruleset")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
ruleset = sgqlc.types.Field("RepositoryRuleset", graphql_name="ruleset")
"""The newly created Ruleset."""
| CreateRepositoryRulesetPayload |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 14384,
"end": 15489
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
occurence_dict = helper_functions.get_value("ClassOccurences")
if len(y.shape) == 2:
stds = []
for i in range(y.shape[1]):
std = np.array(
[occurrence for occurrence in occurence_dict[i].values()],
dtype=np.float64,
)
std = (std / y.shape[0]).std()
stds.append(std)
return np.mean(stds)
else:
occurences = np.array(
[occurrence for occurrence in occurence_dict.values()], dtype=np.float64
)
return (occurences / y.shape[0]).std()
################################################################################
# Reif, A Comprehensive Dataset for Evaluating Approaches of various Meta-Learning Tasks
# defines these five metafeatures as simple metafeatures, but they could also
# be the counterpart for the skewness and kurtosis of the numerical features
@helper_functions.define("NumSymbols")
| ClassProbabilitySTD |
python | pypa__virtualenv | src/virtualenv/discovery/builtin.py | {
"start": 9581,
"end": 9724
} | class ____(PythonInfo):
"""python info from path."""
__all__ = [
"Builtin",
"PathPythonInfo",
"get_interpreter",
]
| PathPythonInfo |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/vertex_ai/test_generative_model.py | {
"start": 16970,
"end": 18174
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("generative_model.GenerativeModelHook"))
def test_execute(self, mock_hook):
cached_content_name = "test"
contents = ["what are in these papers"]
with pytest.warns(AirflowProviderDeprecationWarning):
op = GenerateFromCachedContentOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cached_content_name=cached_content_name,
contents=contents,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.generate_from_cached_content.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cached_content_name=cached_content_name,
contents=contents,
generation_config=None,
safety_settings=None,
)
| TestVertexAIGenerateFromCachedContentOperator |
python | mahmoud__boltons | boltons/socketutils.py | {
"start": 28346,
"end": 28808
} | class ____(NetstringProtocolError):
"""NetstringInvalidSize is raised when the ``:``-delimited size prefix
of the message does not contain a valid integer.
Message showing valid size::
5:hello,
Here the ``5`` is the size. Anything in this prefix position that
is not parsable as a Python integer (i.e., :class:`int`) will raise
this exception.
"""
def __init__(self, msg):
super().__init__(msg)
| NetstringInvalidSize |
python | allegroai__clearml | clearml/debugging/log.py | {
"start": 12253,
"end": 12358
} | class ____(logging.handlers.RotatingFileHandler, ClearmlLoggerHandler):
pass
| ClearmlRotatingFileHandler |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataflow.py | {
"start": 38385,
"end": 42402
} | class ____(GoogleCloudBaseOperator):
"""
Stops the job with the specified name prefix or Job ID.
All jobs with provided name prefix will be stopped.
Streaming jobs are drained by default.
Parameter ``job_name_prefix`` and ``job_id`` are mutually exclusive.
.. seealso::
For more details on stopping a pipeline see:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowStopJobOperator`
:param job_name_prefix: Name prefix specifying which jobs are to be stopped.
:param job_id: Job ID specifying which jobs are to be stopped.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Optional, Job location. If set to None or missing, "us-central1" will be used.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status to confirm it's stopped.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param drain_pipeline: Optional, set to False if want to stop streaming job by canceling it
instead of draining. See: https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param stop_timeout: wait time in seconds for successful job canceling/draining
"""
template_fields = [
"job_id",
"project_id",
"impersonation_chain",
]
def __init__(
self,
job_name_prefix: str | None = None,
job_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
stop_timeout: int | None = 10 * 60,
drain_pipeline: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poll_sleep = poll_sleep
self.stop_timeout = stop_timeout
self.job_name = job_name_prefix
self.job_id = job_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataflowHook | None = None
self.drain_pipeline = drain_pipeline
def execute(self, context: Context) -> None:
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.stop_timeout,
drain_pipeline=self.drain_pipeline,
)
if self.job_id or self.dataflow_hook.is_job_dataflow_running(
name=self.job_name,
project_id=self.project_id,
location=self.location,
):
self.dataflow_hook.cancel_job(
job_name=self.job_name,
project_id=self.project_id,
location=self.location,
job_id=self.job_id,
)
else:
self.log.info("No jobs to stop")
return None
| DataflowStopJobOperator |
python | astropy__astropy | astropy/modeling/rotations.py | {
"start": 12568,
"end": 15004
} | class ____(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be
in units of deg. Inputs are angles on the celestial sphere.
Outputs are angles on the native sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
# Inputs are angles on the celestial sphere
self.inputs = ("alpha_C", "delta_C")
# Outputs are angles on the native sphere
self.outputs = ("phi_N", "theta_N")
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles in the Celestial coordinate frame.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles on the Native sphere.
If float, in degrees.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = np.pi / 2 + lon
theta = np.pi / 2 - lat
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
| RotateCelestial2Native |
python | getsentry__sentry | src/sentry/models/options/option.py | {
"start": 395,
"end": 2005
} | class ____(OverwritableConfigMixin, Model):
"""
Global options which apply in most situations as defaults,
and generally can be overwritten by per-project options.
Options which are specific to a plugin should namespace
their key. e.g. key='myplugin:optname'
"""
# Subclasses should overwrite the relocation scope as appropriate.
__relocation_scope__ = RelocationScope.Excluded
__relocation_custom_ordinal__ = ["key"]
key = models.CharField(max_length=128, unique=True)
last_updated = models.DateTimeField(default=timezone.now)
last_updated_by = models.CharField(
max_length=16,
choices=UpdateChannel.choices(),
default=UpdateChannel.UNKNOWN.value,
db_default=UpdateChannel.UNKNOWN.value,
)
class Meta:
abstract = True
value = models.JSONField(null=True)
__repr__ = sane_repr("key", "value")
@classmethod
def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q:
# These ping options change too frequently, or necessarily with each install, to be useful
# in exports. More broadly, we don't really care about comparing them for accuracy.
return q & ~models.Q(
key__in={
"sentry:install-id", # Only used on self-hosted
"sentry:latest_version", # Auto-generated periodically, which defeats comparison
"sentry:last_worker_ping", # Changes very frequently
"sentry:last_worker_version", # Changes very frequently
}
)
@region_silo_model
| BaseOption |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 844924,
"end": 845652
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectV2."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2Edge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ProjectV2Connection |
python | huggingface__transformers | src/transformers/tokenization_utils_tokenizers.py | {
"start": 2859,
"end": 54191
} | class ____(PreTrainedTokenizerBase):
"""
Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handles all the shared methods for tokenization and special tokens, as well as methods for
downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.
This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, *args, **kwargs):
tokenizer_object = kwargs.pop("tokenizer_object", None)
slow_tokenizer = kwargs.pop("__slow_tokenizer", None)
gguf_file = kwargs.pop("gguf_file", None)
fast_tokenizer_file = kwargs.pop("tokenizer_file", None)
from_slow = kwargs.pop("from_slow", False)
# Note: added_tokens_decoder is NOT popped - it's passed to super().__init__() for processing
added_tokens_decoder = kwargs.get("added_tokens_decoder", {})
# Store add_prefix_space before super().__init__() to ensure it's not overridden
add_prefix_space = kwargs.get("add_prefix_space", False)
if tokenizer_object is not None:
fast_tokenizer = copy.deepcopy(tokenizer_object)
elif fast_tokenizer_file is not None and not from_slow:
# We have a serialization from tokenizers which let us directly build the backend
fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
elif slow_tokenizer:
# We need to convert a slow tokenizer to build the backend
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
elif gguf_file is not None:
# We need to convert a slow tokenizer to build the backend
gguf_param = load_gguf_checkpoint(kwargs.get("vocab_file"))
architecture = gguf_param["config"]["model_type"]
tokenizer_dict = gguf_param["tokenizer"]
tokenizer_config = gguf_param["tokenizer_config"]
fast_tokenizer, additional_kwargs = convert_gguf_tokenizer(architecture, tokenizer_dict)
kwargs.update(tokenizer_config)
if len(additional_kwargs) > 0:
kwargs.update(additional_kwargs)
elif self.slow_tokenizer_class is not None and slow_tokenizer is not False:
# We need to create and convert a slow tokenizer to build the backend
slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs)
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
elif not slow_tokenizer:
# We tried loading a slow_tokenizer with spm and failed, try to load with tiktoken
self.vocab_file = kwargs.get("vocab_file")
# V5: Set _extra_special_tokens directly for converter
self._extra_special_tokens = kwargs.get("extra_special_tokens", [])
fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True)
slow_tokenizer = None
else:
raise ValueError(
"Couldn't instantiate the backend tokenizer from one of: \n"
"(1) a `tokenizers` library serialization file, \n"
"(2) a slow tokenizer instance to convert or \n"
"(3) an equivalent slow tokenizer class to instantiate and convert. \n"
"You need to have sentencepiece or tiktoken installed to convert a slow tokenizer to a fast one."
)
self._tokenizer = fast_tokenizer
if slow_tokenizer is not None:
kwargs.update(slow_tokenizer.init_kwargs)
_truncation = self._tokenizer.truncation
if _truncation is not None:
self._tokenizer.enable_truncation(**_truncation)
kwargs.setdefault("max_length", _truncation["max_length"])
kwargs.setdefault("truncation_side", _truncation["direction"])
kwargs.setdefault("stride", _truncation["stride"])
kwargs.setdefault("truncation_strategy", _truncation["strategy"])
else:
self._tokenizer.no_truncation()
_padding = self._tokenizer.padding
if _padding is not None:
self._tokenizer.enable_padding(**_padding)
kwargs.setdefault("pad_token", _padding["pad_token"])
kwargs.setdefault("pad_token_type_id", _padding["pad_type_id"])
kwargs.setdefault("padding_side", _padding["direction"])
kwargs.setdefault("max_length", _padding["length"])
kwargs.setdefault("pad_to_multiple_of", _padding["pad_to_multiple_of"])
# Set backend to "tokenizers" if not already set
if "backend" not in kwargs:
kwargs["backend"] = "tokenizers"
# We call this after having initialized the backend tokenizer because we update it.
super().__init__(**kwargs)
# Ensure add_prefix_space is set correctly after parent init
self.add_prefix_space = add_prefix_space
self._tokenizer.encode_special_tokens = self.split_special_tokens
added_tokens_decoder_hash = {hash(repr(token)) for token in self.added_tokens_decoder}
tokens_to_add = [
token
for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0])
if hash(repr(token)) not in added_tokens_decoder_hash
]
encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add]
# if some of the special tokens are not already in the tokenizer, add them
# V5: Check both named special tokens and extra special tokens
# Iterate over _special_tokens_map to preserve AddedToken properties (lstrip, rstrip, etc.)
for special_token_value in self._special_tokens_map.values():
if special_token_value is None:
continue
if str(special_token_value) not in encoder and special_token_value not in tokens_to_add:
tokens_to_add.append(special_token_value)
# Also check extra special tokens
for token in self._extra_special_tokens:
if str(token) not in encoder and token not in tokens_to_add:
tokens_to_add.append(token)
if len(tokens_to_add) > 0:
tokens = []
all_named_tokens = [str(t) for t in self._special_tokens_map.values() if t]
for token in tokens_to_add:
if isinstance(token, str):
# Convert string to AddedToken, assuming it's special
token = AddedToken(token, special=True)
elif isinstance(token, AddedToken):
# Ensure the special flag is set correctly for special tokens
if not token.special and str(token) in all_named_tokens:
token.special = True
tokens.append(token)
if tokens:
# These tokens are from the special tokens map
self.add_tokens(tokens, special_tokens=True)
try:
vocab_size = self._tokenizer.get_vocab_size()
except NotImplementedError:
vocab_size = 0
# Optionally patches mistral tokenizers with wrong regex
if vocab_size > 100000 and getattr(self._tokenizer, "pre_tokenizer", None) is not None:
self._tokenizer = self._patch_mistral_regex(
self._tokenizer,
self.init_kwargs.get("name_or_path", None),
init_kwargs=self.init_kwargs,
fix_mistral_regex=kwargs.get("fix_mistral_regex"),
**kwargs,
)
@property
def is_fast(self) -> bool:
return True
@property
def can_save_slow_tokenizer(self) -> bool:
"""
`bool`: Whether or not the slow tokenizer can be saved. For a sentencepiece based slow tokenizer, this
can only be `True` if the original `"sentencepiece.model"` was not deleted.
"""
if "vocab_file" in self.vocab_files_names and self.vocab_files_names["vocab_file"].endswith(".model"):
if hasattr(self, "vocab_file") and self.vocab_file:
# If the vocab file is a sentencepiece model, we can save it
return os.path.isfile(self.vocab_file)
return False
else:
return True
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
def update_post_processor(self):
"""
Updates the underlying post processor with the current `bos_token` and `eos_token`.
"""
bos = self.bos_token
bos_token_id = self.bos_token_id
if bos is None and self.add_bos_token:
raise ValueError("add_bos_token = True but bos_token = None")
eos = self.eos_token
eos_token_id = self.eos_token_id
# If eos_token is None and add_eos_token is True, silently disable add_eos_token
# This allows tokenizers to set add_eos_token even if eos_token is not configured
if eos is None and self.add_eos_token:
self._add_eos_token = False
return
single = f"{(bos + ':0 ') if self.add_bos_token else ''}$A:0{(' ' + eos + ':0') if self.add_eos_token else ''}"
pair = f"{single}{(' ' + bos + ':1') if self.add_bos_token else ''} $B:1{(' ' + eos + ':1') if self.add_eos_token else ''}"
special_tokens = []
if self.add_bos_token:
special_tokens.append((bos, bos_token_id))
if self.add_eos_token:
special_tokens.append((eos, eos_token_id))
self._tokenizer.post_processor = processors.TemplateProcessing(
single=single, pair=pair, special_tokens=special_tokens
)
@property
def add_eos_token(self):
return getattr(self, "_add_eos_token", False)
@property
def add_bos_token(self):
return getattr(self, "_add_bos_token", False)
@add_eos_token.setter
def add_eos_token(self, value):
object.__setattr__(self, "_add_eos_token", value)
self.update_post_processor()
@add_bos_token.setter
def add_bos_token(self, value):
object.__setattr__(self, "_add_bos_token", value)
self.update_post_processor()
def _post_init(self):
"""
Post-initialization hook that runs after the tokenizer is fully set up.
This is called by from_pretrained() after loading the tokenizer, which allows
us to add any special tokens that may have been passed as AddedToken objects.
Child classes should call super()._post_init() if they override this method.
"""
tokens_to_add = []
# V5: Check named special tokens
for token_value in self._special_tokens_map.values():
if token_value is None:
continue
if isinstance(token_value, AddedToken):
if self._tokenizer.token_to_id(str(token_value)) is None:
tokens_to_add.append(token_value)
elif isinstance(token_value, str):
if self._tokenizer.token_to_id(token_value) is None:
tokens_to_add.append(AddedToken(token_value, special=True, normalized=False))
# V5: Check extra special tokens
for token in self._extra_special_tokens:
if isinstance(token, AddedToken):
if self._tokenizer.token_to_id(str(token)) is None:
tokens_to_add.append(token)
elif isinstance(token, str):
if self._tokenizer.token_to_id(token) is None:
tokens_to_add.append(AddedToken(token, special=True, normalized=False))
if tokens_to_add:
# Ensure special tokens are added as such to the backend
self.add_tokens(tokens_to_add, special_tokens=True)
if hasattr(self, "_add_bos_token") or hasattr(self, "_add_eos_token"):
self.update_post_processor()
# Update add_prefix_space in the pre_tokenizer if needed
if hasattr(self, "add_prefix_space"):
try:
tokenizer_json = json.loads(self.backend_tokenizer.to_str())
pre_tok = tokenizer_json.get("pre_tokenizer", {})
# Recursively update add_prefix_space in pretokenizers
def update_add_prefix_space(pretok_dict, value):
updated = False
if pretok_dict.get("type") == "Sequence":
for nested in pretok_dict.get("pretokenizers", []):
updated |= update_add_prefix_space(nested, value)
elif "add_prefix_space" in pretok_dict and pretok_dict["add_prefix_space"] != value:
pretok_dict["add_prefix_space"] = value
updated = True
return updated
if update_add_prefix_space(pre_tok, self.add_prefix_space):
self._tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json))
except Exception:
pass
# Ensure normalizer flags (lowercase/accents/chinese chars) reflect tokenizer attributes
try:
normalizer = self.backend_tokenizer.normalizer
if normalizer is not None:
norm_state = json.loads(normalizer.__getstate__())
norm_type = norm_state.get("type")
desired_lowercase = getattr(self, "do_lower_case", None)
desired_strip_accents = getattr(self, "strip_accents", None)
# Some tokenizers expose keep_accents instead of strip_accents
if desired_strip_accents is None and hasattr(self, "keep_accents") and "strip_accents" in norm_state:
keep_accents_value = getattr(self, "keep_accents")
if keep_accents_value is not None:
desired_strip_accents = not keep_accents_value
desired_handle_chinese = getattr(self, "tokenize_chinese_chars", None)
updated = False
if (
desired_lowercase is not None
and "lowercase" in norm_state
and norm_state["lowercase"] != desired_lowercase
):
norm_state["lowercase"] = desired_lowercase
updated = True
if (
desired_strip_accents is not None
and "strip_accents" in norm_state
and norm_state["strip_accents"] != desired_strip_accents
):
norm_state["strip_accents"] = desired_strip_accents
updated = True
if (
desired_handle_chinese is not None
and "handle_chinese_chars" in norm_state
and norm_state["handle_chinese_chars"] != desired_handle_chinese
):
norm_state["handle_chinese_chars"] = desired_handle_chinese
updated = True
if updated and norm_type is not None:
norm_class = getattr(tokenizers_normalizers, norm_type, None)
if norm_class is not None:
norm_state.pop("type", None)
self.backend_tokenizer.normalizer = norm_class(**norm_state)
except Exception:
# Best-effort: do not block initialization on normalizer reconciliation
pass
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def get_vocab(self) -> dict[str, int]:
return self._tokenizer.get_vocab(with_added_tokens=True)
@property
def vocab(self) -> dict[str, int]:
return self.get_vocab()
@property
def added_tokens_encoder(self) -> dict[str, int]:
"""
Returns the sorted mapping from string to index. The added tokens encoder is cached for performance
optimisation in `self._added_tokens_encoder` for the slow tokenizers.
"""
return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])}
@property
def added_tokens_decoder(self) -> dict[int, AddedToken]:
"""
Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.
Returns:
`dict[str, int]`: The added tokens.
"""
return self._tokenizer.get_added_tokens_decoder()
def get_added_vocab(self) -> dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
`dict[str, int]`: The added tokens.
"""
return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])}
def __bool__(self) -> bool:
"""
Returns True, to avoid expensive `assert tokenizer` gotchas.
"""
return True
def __len__(self) -> int:
"""
Size of the full vocabulary with the added tokens.
"""
return self._tokenizer.get_vocab_size(with_added_tokens=True)
@property
def backend_tokenizer(self) -> TokenizerFast:
"""
`tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend.
"""
return self._tokenizer
@property
def decoder(self) -> DecoderFast:
"""
`tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer.
"""
return self._tokenizer.decoder
def _convert_encoding(
self,
encoding: EncodingFast,
return_token_type_ids: bool | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> tuple[dict[str, Any], list[EncodingFast]]:
"""
Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
of encodings, take care of building a batch from overflowing tokens.
Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
lists (overflows) of lists (tokens).
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_length:
encoding_dict["length"].append(len(e.ids))
return encoding_dict, encodings
def _convert_token_to_id_with_added_voc(self, token: str) -> int:
index = self._tokenizer.token_to_id(token)
if index is None:
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> str | None:
return self._tokenizer.id_to_token(int(index))
def _add_tokens(self, new_tokens: list[str | AddedToken], special_tokens=False) -> int:
if special_tokens:
return self._tokenizer.add_special_tokens(new_tokens)
return self._tokenizer.add_tokens(new_tokens)
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
this inside your training loop.
</Tip>
Args:
pair (`bool`, *optional*, defaults to `False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
`int`: Number of special tokens added to sequences.
"""
return self._tokenizer.num_special_tokens_to_add(pair)
def convert_ids_to_tokens(self, ids: int | list[int], skip_special_tokens: bool = False) -> str | list[str]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `list[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `list[str]`: The decoded token(s).
"""
if isinstance(ids, int):
return self._tokenizer.id_to_token(ids)
tokens = []
# self.all_special_ids is an @property which may be slow, so only compute it once before the loop
ids_to_skip = set(self.all_special_ids) if skip_special_tokens else set()
for index in ids:
index = int(index)
if index in ids_to_skip:
continue
tokens.append(self._tokenizer.id_to_token(index))
return tokens
def tokenize(self, text: str, pair: str | None = None, add_special_tokens: bool = False, **kwargs) -> list[str]:
return self._encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens()
def set_truncation_and_padding(
self,
padding_strategy: PaddingStrategy,
truncation_strategy: TruncationStrategy,
max_length: int,
stride: int,
pad_to_multiple_of: int | None,
padding_side: str | None,
):
"""
Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers
library) and restore the tokenizer settings afterwards.
The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a
padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed
section.
Args:
padding_strategy ([`~utils.PaddingStrategy`]):
The kind of padding that will be applied to the input
truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]):
The kind of truncation that will be applied to the input
max_length (`int`):
The maximum size of a sequence.
stride (`int`):
The stride to use when handling overflow.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
"""
_truncation = self._tokenizer.truncation
_padding = self._tokenizer.padding
# Set truncation and padding on the backend tokenizer
if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE:
if _truncation is not None:
self._tokenizer.no_truncation()
else:
target = {
"max_length": max_length,
"stride": stride,
"strategy": truncation_strategy.value,
"direction": self.truncation_side,
}
# _truncation might contain more keys that the target `transformers`
# supports. Use only the target keys to trigger `enable_truncation`.
# This should enable this code to works on various `tokenizers`
# targets.
if _truncation is None:
current = None
else:
current = {k: _truncation.get(k, None) for k in target}
if current != target:
self._tokenizer.enable_truncation(**target)
if padding_strategy == PaddingStrategy.DO_NOT_PAD:
if _padding is not None:
self._tokenizer.no_padding()
else:
length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None
target = {
"length": length,
"direction": padding_side if padding_side is not None else self.padding_side,
"pad_id": self.pad_token_id,
"pad_token": self.pad_token,
"pad_type_id": self.pad_token_type_id,
"pad_to_multiple_of": pad_to_multiple_of,
}
if _padding != target:
self._tokenizer.enable_padding(**target)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: int | None = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: bool | None = None,
return_token_type_ids: bool | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: Optional[bool] = None,
**kwargs,
) -> BatchEncoding:
# Input validation (from _call_one)
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
if len(t[0]) == 0 or isinstance(t[0][0], str):
return True
elif isinstance(t[0][0], (list, tuple)):
return len(t[0][0]) == 0 or isinstance(t[0][0][0], str)
else:
return False
else:
return False
else:
return False
if not _is_valid_text_input(text):
raise ValueError(
"text input must be of type `str` (single example), `list[str]` (batch or single pretokenized example) "
"or `list[list[str]]` (batch of pretokenized examples) or `list[tuple[list[str], list[str]]]` (batch of pretokenized sequence pairs)."
)
if text_pair is not None and not _is_valid_text_input(text_pair):
raise ValueError(
"text input must be of type `str` (single example), `list[str]` (batch or single pretokenized example) "
"or `list[list[str]]` (batch of pretokenized examples) or `list[tuple[list[str], list[str]]]` (batch of pretokenized sequence pairs)."
)
# Batch detection (from _call_one)
if is_split_into_words:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
else:
is_batched = isinstance(text, (list, tuple))
if is_batched:
# Batch validation
if isinstance(text_pair, str):
raise TypeError(
"when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as"
" `text`."
)
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
else:
# Single input - convert to batch format
batch_text_or_text_pairs = [(text, text_pair)] if text_pair else [text]
# Set tokenizer configuration (from _batch_encode_plus)
if not isinstance(batch_text_or_text_pairs, (tuple, list)):
raise TypeError(
f"batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})"
)
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
)
# Use self.split_special_tokens as default if not explicitly provided
if split_special_tokens is None:
split_special_tokens = self.split_special_tokens
if self._tokenizer.encode_special_tokens != split_special_tokens:
self._tokenizer.encode_special_tokens = split_special_tokens
# Direct rust backend call
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=is_split_into_words,
)
# Convert encodings to BatchEncoding format
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
# If returning overflowing tokens, we need to return a mapping
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
batched_output = BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
# If single input, remove the batch dimension (unless returning overflowing tokens)
if not is_batched and return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: (value[0] if len(value) > 0 and isinstance(value[0], list) else value)
for key, value in batched_output.items()
},
batched_output.encodings,
)
return batched_output
def convert_tokens_to_string(self, tokens: list[str]) -> str:
return (
self.backend_tokenizer.decoder.decode(tokens)
if self.backend_tokenizer.decoder is not None
else " ".join(tokens)
)
def _decode(
self,
token_ids: int | list[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
**kwargs,
) -> str:
# Removed: use_source_tokenizer parameter (unused)
kwargs.pop("use_source_tokenizer", None) # Pop if present to avoid errors
if isinstance(token_ids, int):
token_ids = [token_ids]
return self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
def _save_pretrained(
self,
save_directory: str | os.PathLike,
file_names: tuple[str, ...],
legacy_format: bool | None = None,
filename_prefix: str | None = None,
) -> tuple[str, ...]:
save_directory = str(save_directory)
tokenizer_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_FILE
)
self.backend_tokenizer.save(tokenizer_file)
file_names = file_names + (tokenizer_file,)
return file_names
def train_new_from_iterator(
self,
text_iterator,
vocab_size,
length=None,
new_special_tokens=None,
special_tokens_map=None,
**kwargs,
):
"""
Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline)
as the current one.
Args:
text_iterator (generator of `list[str]`):
The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts
if you have everything in memory.
vocab_size (`int`):
The size of the vocabulary you want for your tokenizer.
length (`int`, *optional*):
The total number of sequences in the iterator. This is used to provide meaningful progress tracking
new_special_tokens (list of `str` or `AddedToken`, *optional*):
A list of new special tokens to add to the tokenizer you are training.
special_tokens_map (`dict[str, str]`, *optional*):
If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special
token name to new special token name in this argument.
kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library.
Returns:
[`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on
`text_iterator`.
"""
tokenizer_json = json.loads(self._tokenizer.to_str())
# Remove added tokens for now (uses IDs of tokens)
added_tokens = tokenizer_json.pop("added_tokens")
# Remove post processor for now (uses IDs of tokens)
post_processor = tokenizer_json.pop("post_processor")
unk_token = None
# Remove vocab
if tokenizer_json["model"]["type"] == "BPE":
tokenizer_json["model"]["vocab"] = {}
tokenizer_json["model"]["merges"] = []
elif tokenizer_json["model"]["type"] == "Unigram":
if tokenizer_json["model"]["unk_id"] is not None:
unk_id = tokenizer_json["model"]["unk_id"]
unk_token = tokenizer_json["model"]["vocab"][unk_id][0]
if special_tokens_map is not None and unk_token in special_tokens_map:
unk_token = special_tokens_map[unk_token]
tokenizer_json["model"]["unk_id"] = 0
tokenizer_json["model"]["vocab"] = [[unk_token, 0.0]]
elif tokenizer_json["model"]["type"] in ["WordLevel", "WordPiece"]:
tokenizer_json["model"]["vocab"] = {}
else:
raise ValueError(
f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) "
"only BPE, Unigram, WordLevel and WordPiece."
)
if (
special_tokens_map is not None
and "unk_token" in tokenizer_json["model"]
and tokenizer_json["model"]["unk_token"] in special_tokens_map
):
tokenizer_json["model"]["unk_token"] = special_tokens_map[tokenizer_json["model"]["unk_token"]]
tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json))
# Get the special tokens from the current tokenizer if none are specified.
special_tokens = []
for added_token in added_tokens:
special = added_token.pop("special", None)
_ = added_token.pop("id", None)
if tokenizer_json["model"]["type"] != "Unigram" and not special:
continue
if special_tokens_map is not None and added_token["content"] in special_tokens_map:
added_token["content"] = special_tokens_map[added_token["content"]]
special_tokens.append(AddedToken(**added_token))
if new_special_tokens is not None:
special_tokens.extend(new_special_tokens)
# Trainer needs to know the end of word / continuing subword thingies in BPE
if (
tokenizer_json["model"]["type"] == "BPE"
and "continuing_subword_prefix" not in kwargs
and tokenizer_json["model"]["continuing_subword_prefix"] is not None
):
kwargs["continuing_subword_prefix"] = tokenizer_json["model"]["continuing_subword_prefix"]
if (
tokenizer_json["model"]["type"] == "BPE"
and "end_of_word_suffix" not in kwargs
and tokenizer_json["model"]["end_of_word_suffix"] is not None
):
kwargs["end_of_word_suffix"] = tokenizer_json["model"]["end_of_word_suffix"]
if tokenizer_json["model"]["type"] == "Unigram" and unk_token is not None:
kwargs["unk_token"] = unk_token
if tokenizer_json["pre_tokenizer"] is not None:
if (
tokenizer_json["pre_tokenizer"]["type"] == "ByteLevel"
or tokenizer_json["pre_tokenizer"]["type"] == "Sequence"
and "pretokenizers" in tokenizer_json["pre_tokenizer"]
and any(
pretokenizer["type"] == "ByteLevel"
for pretokenizer in tokenizer_json["pre_tokenizer"]["pretokenizers"]
)
):
kwargs["initial_alphabet"] = pre_tokenizers_fast.ByteLevel.alphabet()
trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json["model"]["type"]]
trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs)
tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer)
if post_processor is not None:
trained_tokenizer_json = json.loads(tokenizer.to_str())
# Almost done, we just have to adjust the token IDs in the post processor
if "special_tokens" in post_processor:
for key in post_processor["special_tokens"]:
tokens = post_processor["special_tokens"][key]["tokens"]
if special_tokens_map is not None:
tokens = [special_tokens_map.get(token, token) for token in tokens]
post_processor["special_tokens"][key]["tokens"] = tokens
for token in tokens:
token_id = tokenizer.token_to_id(token)
if token_id is None:
raise ValueError(
"Attempted to set a token in the post processor that does not exist in the mapping"
)
post_processor["special_tokens"][key]["ids"] = [tokenizer.token_to_id(token) for token in tokens]
for special_token in ["cls", "sep"]:
if special_token in post_processor:
token, _ = post_processor[special_token]
if special_tokens_map is not None and token in special_tokens_map:
token = special_tokens_map[token]
token_id = tokenizer.token_to_id(token)
if token_id is None:
raise ValueError(
"Attempted to set a token in the post processor that does not exist in the mapping"
)
post_processor[special_token] = [token, token_id]
trained_tokenizer_json["post_processor"] = post_processor
tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json))
kwargs = self.init_kwargs.copy()
# V5: Map pad/cls/mask token at the Transformers level (named tokens only)
for token in PreTrainedTokenizerBase.SPECIAL_TOKENS_ATTRIBUTES:
if getattr(self, token) is not None:
special_token = getattr(self, token)
if special_tokens_map is not None and special_token in special_tokens_map:
special_token = special_tokens_map[special_token]
special_token_full = self._special_tokens_map.get(token, None)
if isinstance(special_token_full, AddedToken):
# Create an added token with the same parameters except the content
kwargs[token] = AddedToken(
special_token,
single_word=special_token_full.single_word,
lstrip=special_token_full.lstrip,
rstrip=special_token_full.rstrip,
normalized=special_token_full.normalized,
special=True,
)
else:
kwargs[token] = special_token
# V5: Handle extra special tokens
extra_special_tokens = self.extra_special_tokens.copy() if self.extra_special_tokens else []
if new_special_tokens is not None:
extra_special_tokens.extend(new_special_tokens)
if len(extra_special_tokens) > 0:
kwargs["extra_special_tokens"] = extra_special_tokens
# Always try to pass tokenizer_object in kwargs first (standard TokenizersBackend usage)
# If the class creates its own tokenizer and passes it explicitly to super().__init__(),
# this will cause a TypeError, which we catch and handle by removing tokenizer_object
# from kwargs and setting _tokenizer directly after initialization.
kwargs["tokenizer_object"] = tokenizer
try:
return self.__class__(**kwargs)
except TypeError as e:
# Check if the error is due to multiple values for tokenizer_object
if "multiple values for keyword argument 'tokenizer_object'" in str(e):
# Class creates its own tokenizer and passes it explicitly (like LayoutLMv3Tokenizer)
# Remove tokenizer_object from kwargs and set _tokenizer directly
kwargs.pop("tokenizer_object", None)
new_tokenizer = self.__class__(**kwargs)
new_tokenizer._tokenizer = tokenizer
return new_tokenizer
else:
# Some other TypeError, re-raise it
raise
@classmethod
def _patch_mistral_regex(
cls,
tokenizer,
pretrained_model_name_or_path,
token=None,
cache_dir=None,
local_files_only=False,
_commit_hash=None,
is_local=False,
init_kwargs=None,
fix_mistral_regex=None,
**kwargs,
):
"""
Patches mistral related tokenizers with incorrect regex if detected
1) Local file with an associated config saved next to it
>> Model type one of the mistral models (on older versions)
2) Remote models on the hub from official mistral models
>> Tags including `base_model:.*mistralai`
"""
import re
from huggingface_hub import model_info
from packaging import version
from transformers.utils.hub import cached_file
def is_base_mistral(model_id: str) -> bool:
model = model_info(model_id)
if model.tags is not None:
if re.search("base_model:.*mistralai", "".join(model.tags)):
return True
return False
if pretrained_model_name_or_path is not None and (is_local or is_base_mistral(pretrained_model_name_or_path)):
_config_file = cached_file(
pretrained_model_name_or_path,
"config.json",
cache_dir=cache_dir,
token=token,
local_files_only=local_files_only,
_raise_exceptions_for_missing_entries=False,
_raise_exceptions_for_connection_errors=False,
_commit_hash=_commit_hash,
)
# Detected using a (local) mistral tokenizer
mistral_config_detected = False
if _config_file is not None:
with open(_config_file, encoding="utf-8") as f:
_config = json.load(f)
transformers_version = _config.get("transformers_version")
transformers_model_type = _config.get("model_type")
# Detect if we can skip the mistral fix by
# a) having a non-mistral tokenizer
# b) fixed version of transformers
if transformers_version and version.parse(transformers_version) <= version.parse("4.57.2"):
if (
is_local
and transformers_model_type is not None
and transformers_model_type
not in [
"mistral",
"mistral3",
"voxtral",
"ministral",
"pixtral",
]
):
return tokenizer
elif transformers_version and version.parse(transformers_version) >= version.parse("5.0.0"):
return tokenizer
mistral_config_detected = True
if mistral_config_detected or (not is_local and is_base_mistral(pretrained_model_name_or_path)):
# Expose the `fix_mistral_regex` flag on the tokenizer when provided, even if no correction is applied.
if init_kwargs and "fix_mistral_regex" in init_kwargs:
setattr(tokenizer, "fix_mistral_regex", init_kwargs["fix_mistral_regex"])
# only warn if its not explicitly passed
if fix_mistral_regex is None and not getattr(tokenizer, "fix_mistral_regex", False):
setattr(tokenizer, "fix_mistral_regex", False)
logger.warning(
f"The tokenizer you are loading from '{pretrained_model_name_or_path}'"
f" with an incorrect regex pattern: https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503/discussions/84#69121093e8b480e709447d5e."
" This will lead to incorrect tokenization. You should set the `fix_mistral_regex=True` flag when loading this tokenizer to fix this issue."
)
elif fix_mistral_regex is True or getattr(tokenizer, "fix_mistral_regex", False):
setattr(tokenizer, "fix_mistral_regex", True)
import tokenizers
split_pretokenizer = tokenizers.pre_tokenizers.Split(
pattern=tokenizers.Regex(
r"[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]*[\p{Ll}\p{Lm}\p{Lo}\p{M}]+|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]+[\p{Ll}\p{Lm}\p{Lo}\p{M}]*|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n/]*|\s*[\r\n]+|\s+(?!\S)|\s+"
),
behavior="isolated",
)
current_pretokenizer = tokenizer.backend_tokenizer.pre_tokenizer
# Check if it's already a Sequence
if isinstance(current_pretokenizer, tokenizers.pre_tokenizers.Sequence):
# Replace the first element (the Split pattern)
tokenizer.backend_tokenizer.pre_tokenizer[0] = split_pretokenizer
else:
# Replace Metaspace with ByteLevel when adding Split, as Metaspace(split=False) doesn't
# work correctly with the Split pre-tokenizer and causes spaces to be lost during encoding
if isinstance(current_pretokenizer, tokenizers.pre_tokenizers.Metaspace):
current_pretokenizer = tokenizers.pre_tokenizers.ByteLevel(
add_prefix_space=False, use_regex=False
)
# Not a Sequence, so create one with Split + current pretokenizer
tokenizer.backend_tokenizer.pre_tokenizer = tokenizers.pre_tokenizers.Sequence(
[
split_pretokenizer,
current_pretokenizer,
]
)
return tokenizer
# Backward-compatible alias: allow referring to TokenizersBackend as PreTrainedTokenizerFast
PreTrainedTokenizerFast = TokenizersBackend
| TokenizersBackend |
python | getsentry__sentry | tests/sentry/integrations/slack/utils/test_channel.py | {
"start": 9507,
"end": 10673
} | class ____(TestCase):
def test_behavior_for_known_slack_identifiers(self) -> None:
# User IDs
assert is_input_a_user_id("U12345678")
assert is_input_a_user_id("W12345678")
# Non-user IDs
assert not is_input_a_user_id("C12345678") # Channel ID
assert not is_input_a_user_id("T12345678") # Team ID
assert not is_input_a_user_id("A12345678") # App ID
assert not is_input_a_user_id("F12345678") # File ID
def create_user_response(*, user):
return SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={},
data={"ok": True, "user": user},
headers={},
status_code=200,
)
def create_user_error(*, error, status_code: int = 400):
return SlackApiError(
message=error,
response=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={},
data={"ok": False, "error": error},
headers={},
status_code=status_code,
),
)
| IsInputAUserIdTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.