language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/number-of-islands.py | {
"start": 2546,
"end": 3519
} | class ____(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def bfs(grid, i, j):
if grid[i][j] == '0':
return False
grid[i][j] ='0'
q = collections.deque([(i, j)])
while q:
r, c = q.popleft()
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(grid) and
0 <= nc < len(grid[0]) and
grid[nr][nc] == '1'):
continue
grid[nr][nc] = '0'
q.append((nr, nc))
return True
count = 0
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if bfs(grid, i, j):
count += 1
return count
| Solution3 |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/auth_generated.py | {
"start": 233,
"end": 408
} | class ____(BaseModel):
"""
HTTPException Model used for error response.
"""
detail: Annotated[str | dict[str, Any], Field(title="Detail")]
| HTTPExceptionResponse |
python | pydantic__pydantic | pydantic/v1/utils.py | {
"start": 12961,
"end": 14720
} | class ____(Representation):
"""
Hack to make object's smell just enough like dicts for validate_model.
We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.
"""
__slots__ = ('_obj',)
def __init__(self, obj: Any):
self._obj = obj
def __getitem__(self, key: str) -> Any:
try:
return getattr(self._obj, key)
except AttributeError as e:
raise KeyError(key) from e
def get(self, key: Any, default: Any = None) -> Any:
return getattr(self._obj, key, default)
def extra_keys(self) -> Set[Any]:
"""
We don't want to get any other attributes of obj if the model didn't explicitly ask for them
"""
return set()
def keys(self) -> List[Any]:
"""
Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python
dictionaries.
"""
return list(self)
def values(self) -> List[Any]:
return [self[k] for k in self]
def items(self) -> Iterator[Tuple[str, Any]]:
for k in self:
yield k, self.get(k)
def __iter__(self) -> Iterator[str]:
for name in dir(self._obj):
if not name.startswith('_'):
yield name
def __len__(self) -> int:
return sum(1 for _ in self)
def __contains__(self, item: Any) -> bool:
return item in self.keys()
def __eq__(self, other: Any) -> bool:
return dict(self) == dict(other.items())
def __repr_args__(self) -> 'ReprArgs':
return [(None, dict(self))]
def __repr_name__(self) -> str:
return f'GetterDict[{display_as_type(self._obj)}]'
| GetterDict |
python | wandb__wandb | wandb/vendor/pygments/style.py | {
"start": 883,
"end": 4554
} | class ____(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text in ansicolors:
return text
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return ''
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
ansicolor = bgansicolor = None
color = t[0]
if color.startswith('#ansi'):
ansicolor = color
color = _ansimap[color]
bgcolor = t[4]
if bgcolor.startswith('#ansi'):
bgansicolor = bgcolor
bgcolor = _ansimap[bgcolor]
return {
'color': color or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': bgcolor or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
'ansicolor': ansicolor,
'bgansicolor': bgansicolor,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
@add_metaclass(StyleMeta)
| StyleMeta |
python | joke2k__faker | faker/providers/isbn/en_US/__init__.py | {
"start": 42,
"end": 1166
} | class ____(ISBNProvider):
rules = {
# EAN prefix
"978": {
# Registration group
"0": [
# Registrant rule (min, max, registrant length)
("0000000", "1999999", 2),
("2000000", "2279999", 3),
("2280000", "2289999", 4),
("2290000", "6479999", 3),
("6480000", "6489999", 7),
("6490000", "6999999", 3),
("7000000", "8499999", 4),
("8500000", "8999999", 5),
("9000000", "9499999", 6),
("9500000", "9999999", 7),
],
"1": [
("0000000", "0999999", 2),
("1000000", "3999999", 3),
("4000000", "5499999", 4),
("5500000", "7319999", 5),
("7320000", "7399999", 7),
("7400000", "8697999", 5),
("8698000", "9729999", 6),
("9730000", "9877999", 4),
("9878000", "9989999", 6),
("9990000", "9999999", 7),
],
},
}
| Provider |
python | django__django | django/utils/feedgenerator.py | {
"start": 11341,
"end": 13663
} | class ____(RssFeed):
# Spec: https://cyber.harvard.edu/rss/rss.html
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item["title"])
handler.addQuickElement("link", item["link"])
if item["description"] is not None:
handler.addQuickElement("description", item["description"])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(
"author", "%s (%s)" % (item["author_email"], item["author_name"])
)
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(
"dc:creator",
item["author_name"],
{"xmlns:dc": "http://purl.org/dc/elements/1.1/"},
)
if item["pubdate"] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item["pubdate"]))
if item["comments"] is not None:
handler.addQuickElement("comments", item["comments"])
if item["unique_id"] is not None:
guid_attrs = {}
if isinstance(item.get("unique_id_is_permalink"), bool):
guid_attrs["isPermaLink"] = str(item["unique_id_is_permalink"]).lower()
handler.addQuickElement("guid", item["unique_id"], guid_attrs)
if item["ttl"] is not None:
handler.addQuickElement("ttl", item["ttl"])
# Enclosure.
if item["enclosures"]:
enclosures = list(item["enclosures"])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement(
"enclosure",
"",
{
"url": enclosure.url,
"length": enclosure.length,
"type": enclosure.mime_type,
},
)
# Categories.
for cat in item["categories"]:
handler.addQuickElement("category", cat)
| Rss201rev2Feed |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-events-that-can-be-attended-ii.py | {
"start": 688,
"end": 1233
} | class ____(object):
def maxValue(self, events, k):
"""
:type events: List[List[int]]
:type k: int
:rtype: int
"""
events.sort()
sorted_starts = [x[0] for x in events]
dp = [[0]*(k+1) for _ in xrange(len(events)+1)]
for i in reversed(xrange(len(events))):
next_i = bisect.bisect_right(sorted_starts, events[i][1])-1
for j in xrange(1, k+1):
dp[i][j] = max(dp[i+1][j], dp[next_i+1][j-1]+events[i][2])
return dp[0][-1]
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/type-of-triangle-ii.py | {
"start": 36,
"end": 400
} | class ____(object):
def triangleType(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
nums.sort()
a, b, c = nums
if a+b <= c:
return "none"
if a == b == c:
return "equilateral"
if a == b or b == c:
return "isosceles"
return "scalene"
| Solution |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/inspect_utils_test.py | {
"start": 1943,
"end": 16582
} | class ____(test.TestCase):
def test_islambda(self):
def test_fn():
pass
self.assertTrue(inspect_utils.islambda(lambda x: x))
self.assertFalse(inspect_utils.islambda(test_fn))
def test_islambda_renamed_lambda(self):
l = lambda x: 1
l.__name__ = 'f'
self.assertTrue(inspect_utils.islambda(l))
def test_isnamedtuple(self):
nt = collections.namedtuple('TestNamedTuple', ['a', 'b'])
class NotANamedTuple(tuple):
pass
self.assertTrue(inspect_utils.isnamedtuple(nt))
self.assertFalse(inspect_utils.isnamedtuple(NotANamedTuple))
def test_isnamedtuple_confounder(self):
"""This test highlights false positives when detecting named tuples."""
class NamedTupleLike(tuple):
_fields = ('a', 'b')
self.assertTrue(inspect_utils.isnamedtuple(NamedTupleLike))
def test_isnamedtuple_subclass(self):
"""This test highlights false positives when detecting named tuples."""
class NamedTupleSubclass(collections.namedtuple('Test', ['a', 'b'])):
pass
self.assertTrue(inspect_utils.isnamedtuple(NamedTupleSubclass))
def assertSourceIdentical(self, actual, expected):
self.assertEqual(
textwrap.dedent(actual).strip(),
textwrap.dedent(expected).strip()
)
def test_getimmediatesource_basic(self):
def test_decorator(f):
def f_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return f_wrapper
expected = """
def f_wrapper(*args, **kwargs):
return f(*args, **kwargs)
"""
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_noop_decorator(self):
def test_decorator(f):
return f
expected = '''
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
'''
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_functools_wrapper(self):
def wrapper_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
expected = textwrap.dedent("""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@wrapper_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_functools_wrapper_different_module(self):
expected = textwrap.dedent("""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.wrapping_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_normal_decorator_different_module(self):
expected = textwrap.dedent("""
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.standalone_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_normal_functional_decorator_different_module(
self):
expected = textwrap.dedent("""
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.functional_decorator()
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getnamespace_globals(self):
ns = inspect_utils.getnamespace(factory)
self.assertEqual(ns['free_function'], free_function)
def test_getnamespace_closure_with_undefined_var(self):
if False: # pylint:disable=using-constant-test
a = 1
def test_fn():
return a
ns = inspect_utils.getnamespace(test_fn)
self.assertNotIn('a', ns)
a = 2
ns = inspect_utils.getnamespace(test_fn)
self.assertEqual(ns['a'], 2)
def test_getnamespace_hermetic(self):
# Intentionally hiding the global function to make sure we don't overwrite
# it in the global namespace.
free_function = object() # pylint:disable=redefined-outer-name
def test_fn():
return free_function
ns = inspect_utils.getnamespace(test_fn)
globs = test_fn.__globals__
self.assertTrue(ns['free_function'] is free_function)
self.assertFalse(globs['free_function'] is free_function)
def test_getnamespace_locals(self):
def called_fn():
return 0
closed_over_list = []
closed_over_primitive = 1
def local_fn():
closed_over_list.append(1)
local_var = 1
return called_fn() + local_var + closed_over_primitive
ns = inspect_utils.getnamespace(local_fn)
self.assertEqual(ns['called_fn'], called_fn)
self.assertEqual(ns['closed_over_list'], closed_over_list)
self.assertEqual(ns['closed_over_primitive'], closed_over_primitive)
self.assertTrue('local_var' not in ns)
def test_getqualifiedname(self):
foo = object()
qux = types.ModuleType('quxmodule')
bar = types.ModuleType('barmodule')
baz = object()
bar.baz = baz
ns = {
'foo': foo,
'bar': bar,
'qux': qux,
}
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertEqual(inspect_utils.getqualifiedname(ns, foo), 'foo')
self.assertEqual(inspect_utils.getqualifiedname(ns, bar), 'bar')
self.assertEqual(inspect_utils.getqualifiedname(ns, baz), 'bar.baz')
def test_getqualifiedname_efficiency(self):
foo = object()
# We create a densely connected graph consisting of a relatively small
# number of modules and hide our symbol in one of them. The path to the
# symbol is at least 10, and each node has about 10 neighbors. However,
# by skipping visited modules, the search should take much less.
ns = {}
prev_level = []
for i in range(10):
current_level = []
for j in range(10):
mod_name = 'mod_{}_{}'.format(i, j)
mod = types.ModuleType(mod_name)
current_level.append(mod)
if i == 9 and j == 9:
mod.foo = foo
if prev_level:
# All modules at level i refer to all modules at level i+1
for prev in prev_level:
for mod in current_level:
prev.__dict__[mod.__name__] = mod
else:
for mod in current_level:
ns[mod.__name__] = mod
prev_level = current_level
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertIsNotNone(
inspect_utils.getqualifiedname(ns, foo, max_depth=10000000000))
def test_getqualifiedname_cycles(self):
foo = object()
# We create a graph of modules that contains circular references. The
# search process should avoid them. The searched object is hidden at the
# bottom of a path of length roughly 10.
ns = {}
mods = []
for i in range(10):
mod = types.ModuleType('mod_{}'.format(i))
if i == 9:
mod.foo = foo
# Module i refers to module i+1
if mods:
mods[-1].__dict__[mod.__name__] = mod
else:
ns[mod.__name__] = mod
# Module i refers to all modules j < i.
for prev in mods:
mod.__dict__[prev.__name__] = prev
mods.append(mod)
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertIsNotNone(
inspect_utils.getqualifiedname(ns, foo, max_depth=10000000000))
def test_getqualifiedname_finds_via_parent_module(self):
# TODO(mdan): This test is vulnerable to change in the lib module.
# A better way to forge modules should be found.
self.assertEqual(
inspect_utils.getqualifiedname(
lib.__dict__, lib.io.file_io.FileIO, max_depth=1),
'io.file_io.FileIO')
def test_getmethodclass(self):
self.assertEqual(
inspect_utils.getmethodclass(free_function), None)
self.assertEqual(
inspect_utils.getmethodclass(free_factory()), None)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.class_method),
TestClass)
test_obj = TestClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.class_method),
TestClass)
def test_getmethodclass_locals(self):
def local_function():
pass
class LocalClass:
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
self.assertEqual(
inspect_utils.getmethodclass(local_function), None)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.wrap_decorated_member),
LocalClass)
test_obj = LocalClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
LocalClass)
def test_getmethodclass_callables(self):
class TestCallable:
def __call__(self):
pass
c = TestCallable()
self.assertEqual(inspect_utils.getmethodclass(c), TestCallable)
def test_getmethodclass_no_bool_conversion(self):
tensor = constant_op.constant([1])
self.assertEqual(
inspect_utils.getmethodclass(tensor.get_shape), type(tensor))
def test_getdefiningclass(self):
class Superclass:
def foo(self):
pass
def bar(self):
pass
@classmethod
def class_method(cls):
pass
class Subclass(Superclass):
def foo(self):
pass
def baz(self):
pass
self.assertIs(
inspect_utils.getdefiningclass(Subclass.foo, Subclass), Subclass)
self.assertIs(
inspect_utils.getdefiningclass(Subclass.bar, Subclass), Superclass)
self.assertIs(
inspect_utils.getdefiningclass(Subclass.baz, Subclass), Subclass)
self.assertIs(
inspect_utils.getdefiningclass(Subclass.class_method, Subclass),
Superclass)
def test_isbuiltin(self):
self.assertTrue(inspect_utils.isbuiltin(enumerate))
self.assertTrue(inspect_utils.isbuiltin(eval))
self.assertTrue(inspect_utils.isbuiltin(float))
self.assertTrue(inspect_utils.isbuiltin(int))
self.assertTrue(inspect_utils.isbuiltin(len))
self.assertTrue(inspect_utils.isbuiltin(range))
self.assertTrue(inspect_utils.isbuiltin(zip))
self.assertFalse(inspect_utils.isbuiltin(function_decorator))
def test_isconstructor(self):
class OrdinaryClass:
pass
class OrdinaryCallableClass:
def __call__(self):
pass
class Metaclass(type):
pass
class CallableMetaclass(type):
def __call__(cls):
pass
self.assertTrue(inspect_utils.isconstructor(OrdinaryClass))
self.assertTrue(inspect_utils.isconstructor(OrdinaryCallableClass))
self.assertTrue(inspect_utils.isconstructor(Metaclass))
self.assertTrue(inspect_utils.isconstructor(Metaclass('TestClass', (), {})))
self.assertTrue(inspect_utils.isconstructor(CallableMetaclass))
self.assertFalse(inspect_utils.isconstructor(
CallableMetaclass('TestClass', (), {})))
def test_isconstructor_abc_callable(self):
class AbcBase(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self):
pass
class AbcSubclass(AbcBase):
def __init__(self):
pass
def __call__(self):
pass
self.assertTrue(inspect_utils.isconstructor(AbcBase))
self.assertTrue(inspect_utils.isconstructor(AbcSubclass))
def test_getfutureimports_functions(self):
imps = inspect_utils.getfutureimports(basic_definitions.function_with_print)
self.assertNotIn('absolute_import', imps)
self.assertNotIn('division', imps)
self.assertNotIn('print_function', imps)
self.assertNotIn('generators', imps)
def test_getfutureimports_lambdas(self):
imps = inspect_utils.getfutureimports(basic_definitions.simple_lambda)
self.assertNotIn('absolute_import', imps)
self.assertNotIn('division', imps)
self.assertNotIn('print_function', imps)
self.assertNotIn('generators', imps)
def test_getfutureimports_methods(self):
imps = inspect_utils.getfutureimports(
basic_definitions.SimpleClass.method_with_print)
self.assertNotIn('absolute_import', imps)
self.assertNotIn('division', imps)
self.assertNotIn('print_function', imps)
self.assertNotIn('generators', imps)
if __name__ == '__main__':
test.main()
| InspectUtilsTest |
python | sqlalchemy__sqlalchemy | test/sql/test_operators.py | {
"start": 174622,
"end": 176457
} | class ____(
fixtures.TestBase, testing.AssertsCompiledSQL
):
__dialect__ = "default"
def _combinations(fn):
return testing.combinations(
desc,
asc,
nulls_first,
nulls_last,
any_,
all_,
distinct,
bitwise_not,
collate,
)(fn)
@_combinations
def test_move(self, operator):
m1 = column("q")
m2 = mock.Mock()
class MyCustomThing(roles.ByOfRole, SQLColumnExpression):
def __clause_element__(self):
return m1
@property
def comparator(self):
return Comparator()
def operate(
self,
op,
*other,
**kwargs,
):
return op(self.comparator, *other, **kwargs)
def reverse_operate(
self,
op,
*other,
**kwargs,
):
return op(other, self.comparator, **kwargs)
class Comparator(ColumnOperators):
def _operate(self, *arg, **kw):
return m2
setattr(Comparator, operator.__name__, Comparator._operate)
mc = MyCustomThing()
if operator is collate:
result = operator(mc, "some collation")
else:
result = operator(mc)
is_(result, m2)
@_combinations
def test_text(self, operator):
if operator is collate:
result = operator(text("foo"), "some collation")
else:
result = operator(text("foo"))
# Assert that the operation completed without crashing
# and returned a valid SQL expression
assert result is not None
| StandaloneOperatorTranslateTest |
python | pytorch__pytorch | torch/distributions/dirichlet.py | {
"start": 634,
"end": 1100
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, concentration):
x = torch._sample_dirichlet(concentration)
ctx.save_for_backward(x, concentration)
return x
@staticmethod
@once_differentiable
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
x, concentration = ctx.saved_tensors
return _Dirichlet_backward(x, concentration, grad_output)
| _Dirichlet |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image/base.py | {
"start": 376,
"end": 5312
} | class ____(BaseReader):
"""
Image parser.
Extract text from images using DONUT or pytesseract.
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
parse_text: bool = False,
text_type: str = "text",
pytesseract_model_kwargs: Dict[str, Any] = {},
):
"""Init parser."""
self._text_type = text_type
if parser_config is None and parse_text:
if text_type == "plain_text":
try:
import pytesseract
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageReader when text_type is 'plain_text': "
"`pip install pytesseract`"
)
processor = None
model = pytesseract
else:
try:
import sentencepiece # noqa
import torch # noqa
from PIL import Image # noqa
from transformers import DonutProcessor, VisionEncoderDecoderModel
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
processor = DonutProcessor.from_pretrained(
"naver-clova-ix/donut-base-finetuned-cord-v2"
)
model = VisionEncoderDecoderModel.from_pretrained(
"naver-clova-ix/donut-base-finetuned-cord-v2"
)
parser_config = {"processor": processor, "model": model}
self._parser_config = parser_config
self._keep_image = keep_image
self._parse_text = parse_text
self._pytesseract_model_kwargs = pytesseract_model_kwargs
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
if fs:
with fs.open(path=file) as f:
image = Image.open(BytesIO(f.read()))
else:
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
text_str: str = ""
if self._parse_text:
assert self._parser_config is not None
model = self._parser_config["model"]
processor = self._parser_config["processor"]
if processor:
device = infer_torch_device()
model.to(device)
# prepare decoder inputs
task_prompt = "<s_cord-v2>"
decoder_input_ids = processor.tokenizer(
task_prompt, add_special_tokens=False, return_tensors="pt"
).input_ids
pixel_values = processor(image, return_tensors="pt").pixel_values
outputs = model.generate(
pixel_values.to(device),
decoder_input_ids=decoder_input_ids.to(device),
max_length=model.decoder.config.max_position_embeddings,
early_stopping=True,
pad_token_id=processor.tokenizer.pad_token_id,
eos_token_id=processor.tokenizer.eos_token_id,
use_cache=True,
num_beams=3,
bad_words_ids=[[processor.tokenizer.unk_token_id]],
return_dict_in_generate=True,
)
sequence = processor.batch_decode(outputs.sequences)[0]
sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(
processor.tokenizer.pad_token, ""
)
# remove first task start token
text_str = re.sub(r"<.*?>", "", sequence, count=1).strip()
else:
import pytesseract
model = cast(pytesseract, self._parser_config["model"])
text_str = model.image_to_string(
image, **self._pytesseract_model_kwargs
)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
| ImageReader |
python | gevent__gevent | src/gevent/monkey/_patch_thread_gte313.py | {
"start": 222,
"end": 4082
} | class ____(BasePatcher):
def patch_active_threads(self):
from gevent.threading import main_native_thread
for thread in self.threading_mod._active.values():
if thread == main_native_thread():
from gevent.thread import _ThreadHandle
from greenlet import getcurrent
thread._after_fork = lambda new_ident=None: new_ident
handle = _ThreadHandle()
handle._set_greenlet(getcurrent())
handle_attr = '_handle'
if hasattr(thread, '_os_thread_handle'):
handle_attr = '_os_thread_handle'
setattr(thread, handle_attr, handle)
thread._ident = handle.ident
assert thread.ident == getattr(thread, handle_attr).ident
continue
thread.join = self._make_existing_non_main_thread_join_func(thread,
None,
self.threading_mod)
def patch_threading_shutdown_on_main_thread_not_already_patched(self):
import greenlet
from .api import patch_item
main_thread = self.main_thread
threading_mod = self.threading_mod
orig_shutdown = self.orig_shutdown
_greenlet = main_thread._greenlet = greenlet.getcurrent()
handle_attr = '_handle'
if hasattr(main_thread, '_os_thread_handle'):
handle_attr = '_os_thread_handle'
def _shutdown():
# Release anyone trying to join() me,
# and let us switch to them.
getattr(main_thread, handle_attr)._set_done()
from gevent import sleep
try:
sleep()
except: # pylint:disable=bare-except
# A greenlet could have .kill() us
# or .throw() to us. I'm the main greenlet,
# there's no where else for this to go.
from gevent import get_hub
get_hub().print_exception(_greenlet, *sys.exc_info())
# Now, this may have resulted in us getting stopped
# if some other greenlet actually just ran there.
# That's not good, we're not supposed to be stopped
# when we enter _shutdown.
class FakeHandle:
def is_done(self):
return False
def _set_done(self):
return
def join(self):
return
setattr(main_thread, handle_attr, FakeHandle())
assert main_thread.is_alive()
# main_thread._is_stopped = False
# main_thread._tstate_lock = main_thread.__real_tstate_lock
# main_thread.__real_tstate_lock = None
# The only truly blocking native shutdown lock to
# acquire should be our own (hopefully), and the call to
# _stop that orig_shutdown makes will discard it.
# XXX: What if more get spawned?
for t in list(threading_mod.enumerate()):
if t.daemon or t is main_thread:
continue
while t.is_alive():
# 3.13.3 and >= 3.13.4 name this different
handle = getattr(t, handle_attr)
try:
handle.join(0.001)
except RuntimeError:
# Joining ourself.
handle._set_done()
break
try:
orig_shutdown()
except LoopExit: # pragma: no cover
pass
patch_item(threading_mod, '_shutdown', self.orig_shutdown)
patch_item(self.threading_mod, '_shutdown', _shutdown)
| Patcher |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-path-with-alternating-directions-i.py | {
"start": 36,
"end": 303
} | class ____(object):
def minCost(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if (m, n) == (1, 1):
return 1
if (m, n) in ((1, 2), (2, 1)):
return 3
return -1
| Solution |
python | encode__httpx | httpx/_urls.py | {
"start": 14147,
"end": 21515
} | class ____(typing.Mapping[str, str]):
"""
URL query parameters, as a multi-dict.
"""
def __init__(self, *args: QueryParamTypes | None, **kwargs: typing.Any) -> None:
assert len(args) < 2, "Too many arguments."
assert not (args and kwargs), "Cannot mix named and unnamed arguments."
value = args[0] if args else kwargs
if value is None or isinstance(value, (str, bytes)):
value = value.decode("ascii") if isinstance(value, bytes) else value
self._dict = parse_qs(value, keep_blank_values=True)
elif isinstance(value, QueryParams):
self._dict = {k: list(v) for k, v in value._dict.items()}
else:
dict_value: dict[typing.Any, list[typing.Any]] = {}
if isinstance(value, (list, tuple)):
# Convert list inputs like:
# [("a", "123"), ("a", "456"), ("b", "789")]
# To a dict representation, like:
# {"a": ["123", "456"], "b": ["789"]}
for item in value:
dict_value.setdefault(item[0], []).append(item[1])
else:
# Convert dict inputs like:
# {"a": "123", "b": ["456", "789"]}
# To dict inputs where values are always lists, like:
# {"a": ["123"], "b": ["456", "789"]}
dict_value = {
k: list(v) if isinstance(v, (list, tuple)) else [v]
for k, v in value.items()
}
# Ensure that keys and values are neatly coerced to strings.
# We coerce values `True` and `False` to JSON-like "true" and "false"
# representations, and coerce `None` values to the empty string.
self._dict = {
str(k): [primitive_value_to_str(item) for item in v]
for k, v in dict_value.items()
}
def keys(self) -> typing.KeysView[str]:
"""
Return all the keys in the query params.
Usage:
q = httpx.QueryParams("a=123&a=456&b=789")
assert list(q.keys()) == ["a", "b"]
"""
return self._dict.keys()
def values(self) -> typing.ValuesView[str]:
"""
Return all the values in the query params. If a key occurs more than once
only the first item for that key is returned.
Usage:
q = httpx.QueryParams("a=123&a=456&b=789")
assert list(q.values()) == ["123", "789"]
"""
return {k: v[0] for k, v in self._dict.items()}.values()
def items(self) -> typing.ItemsView[str, str]:
"""
Return all items in the query params. If a key occurs more than once
only the first item for that key is returned.
Usage:
q = httpx.QueryParams("a=123&a=456&b=789")
assert list(q.items()) == [("a", "123"), ("b", "789")]
"""
return {k: v[0] for k, v in self._dict.items()}.items()
def multi_items(self) -> list[tuple[str, str]]:
"""
Return all items in the query params. Allow duplicate keys to occur.
Usage:
q = httpx.QueryParams("a=123&a=456&b=789")
assert list(q.multi_items()) == [("a", "123"), ("a", "456"), ("b", "789")]
"""
multi_items: list[tuple[str, str]] = []
for k, v in self._dict.items():
multi_items.extend([(k, i) for i in v])
return multi_items
def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any:
"""
Get a value from the query param for a given key. If the key occurs
more than once, then only the first value is returned.
Usage:
q = httpx.QueryParams("a=123&a=456&b=789")
assert q.get("a") == "123"
"""
if key in self._dict:
return self._dict[str(key)][0]
return default
def get_list(self, key: str) -> list[str]:
"""
Get all values from the query param for a given key.
Usage:
q = httpx.QueryParams("a=123&a=456&b=789")
assert q.get_list("a") == ["123", "456"]
"""
return list(self._dict.get(str(key), []))
def set(self, key: str, value: typing.Any = None) -> QueryParams:
"""
Return a new QueryParams instance, setting the value of a key.
Usage:
q = httpx.QueryParams("a=123")
q = q.set("a", "456")
assert q == httpx.QueryParams("a=456")
"""
q = QueryParams()
q._dict = dict(self._dict)
q._dict[str(key)] = [primitive_value_to_str(value)]
return q
def add(self, key: str, value: typing.Any = None) -> QueryParams:
"""
Return a new QueryParams instance, setting or appending the value of a key.
Usage:
q = httpx.QueryParams("a=123")
q = q.add("a", "456")
assert q == httpx.QueryParams("a=123&a=456")
"""
q = QueryParams()
q._dict = dict(self._dict)
q._dict[str(key)] = q.get_list(key) + [primitive_value_to_str(value)]
return q
def remove(self, key: str) -> QueryParams:
"""
Return a new QueryParams instance, removing the value of a key.
Usage:
q = httpx.QueryParams("a=123")
q = q.remove("a")
assert q == httpx.QueryParams("")
"""
q = QueryParams()
q._dict = dict(self._dict)
q._dict.pop(str(key), None)
return q
def merge(self, params: QueryParamTypes | None = None) -> QueryParams:
"""
Return a new QueryParams instance, updated with.
Usage:
q = httpx.QueryParams("a=123")
q = q.merge({"b": "456"})
assert q == httpx.QueryParams("a=123&b=456")
q = httpx.QueryParams("a=123")
q = q.merge({"a": "456", "b": "789"})
assert q == httpx.QueryParams("a=456&b=789")
"""
q = QueryParams(params)
q._dict = {**self._dict, **q._dict}
return q
def __getitem__(self, key: typing.Any) -> str:
return self._dict[key][0]
def __contains__(self, key: typing.Any) -> bool:
return key in self._dict
def __iter__(self) -> typing.Iterator[typing.Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self._dict)
def __bool__(self) -> bool:
return bool(self._dict)
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, self.__class__):
return False
return sorted(self.multi_items()) == sorted(other.multi_items())
def __str__(self) -> str:
return urlencode(self.multi_items())
def __repr__(self) -> str:
class_name = self.__class__.__name__
query_string = str(self)
return f"{class_name}({query_string!r})"
def update(self, params: QueryParamTypes | None = None) -> None:
raise RuntimeError(
"QueryParams are immutable since 0.18.0. "
"Use `q = q.merge(...)` to create an updated copy."
)
def __setitem__(self, key: str, value: str) -> None:
raise RuntimeError(
"QueryParams are immutable since 0.18.0. "
"Use `q = q.set(key, value)` to create an updated copy."
)
| QueryParams |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/config_schema.py | {
"start": 1644,
"end": 4468
} | class ____(DagsterTypeLoader):
def __init__(self, config_type, func, required_resource_keys):
self._config_type = check.inst_param(config_type, "config_type", ConfigType)
self._func = check.callable_param(func, "func")
self._required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
@property
def schema_type(self) -> ConfigType:
return self._config_type
def construct_from_config_value(
self, context: "DagsterTypeLoaderContext", config_value: object
):
return self._func(context, config_value)
def required_resource_keys(self):
return frozenset(self._required_resource_keys)
def _create_type_loader_for_decorator(
config_type: ConfigType, func, required_resource_keys: Optional[AbstractSet[str]]
):
return DagsterTypeLoaderFromDecorator(config_type, func, required_resource_keys)
DagsterTypeLoaderFn: TypeAlias = Callable[["DagsterTypeLoaderContext", Any], Any]
@public
def dagster_type_loader(
config_schema: object, required_resource_keys: Optional[AbstractSet[str]] = None
) -> Callable[[DagsterTypeLoaderFn], DagsterTypeLoaderFromDecorator]:
"""Create an dagster type loader that maps config data to a runtime value.
The decorated function should take the execution context and parsed config value and return the
appropriate runtime value.
Args:
config_schema (ConfigSchema): The schema for the config that's passed to the decorated
function.
Examples:
.. code-block:: python
@dagster_type_loader(Permissive())
def load_dict(_context, value):
return value
"""
from dagster._config import resolve_to_config_type
config_type = resolve_to_config_type(config_schema)
assert isinstance(config_type, ConfigType), (
f"{config_schema} could not be resolved to config type"
)
EXPECTED_POSITIONALS = ["context", "*"]
def wrapper(func: DagsterTypeLoaderFn) -> DagsterTypeLoaderFromDecorator:
params = get_function_params(func)
missing_positional = validate_expected_params(params, EXPECTED_POSITIONALS)
if missing_positional:
raise DagsterInvalidDefinitionError(
f"@dagster_type_loader '{func.__name__}' decorated function does not have required"
f" positional parameter '{missing_positional}'. @dagster_type_loader decorated"
" functions should only have keyword arguments that match input names and a first"
" positional parameter named 'context'."
)
return _create_type_loader_for_decorator(
config_type, func, required_resource_keys
)
return wrapper
| DagsterTypeLoaderFromDecorator |
python | Textualize__textual | docs/examples/how-to/layout02.py | {
"start": 491,
"end": 650
} | class ____(App):
def on_ready(self) -> None:
self.push_screen(TweetScreen())
if __name__ == "__main__":
app = LayoutApp()
app.run()
| LayoutApp |
python | doocs__leetcode | solution/1800-1899/1895.Largest Magic Square/Solution.py | {
"start": 0,
"end": 1578
} | class ____:
def largestMagicSquare(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
rowsum = [[0] * (n + 1) for _ in range(m + 1)]
colsum = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
rowsum[i][j] = rowsum[i][j - 1] + grid[i - 1][j - 1]
colsum[i][j] = colsum[i - 1][j] + grid[i - 1][j - 1]
def check(x1, y1, x2, y2):
val = rowsum[x1 + 1][y2 + 1] - rowsum[x1 + 1][y1]
for i in range(x1 + 1, x2 + 1):
if rowsum[i + 1][y2 + 1] - rowsum[i + 1][y1] != val:
return False
for j in range(y1, y2 + 1):
if colsum[x2 + 1][j + 1] - colsum[x1][j + 1] != val:
return False
s, i, j = 0, x1, y1
while i <= x2:
s += grid[i][j]
i += 1
j += 1
if s != val:
return False
s, i, j = 0, x1, y2
while i <= x2:
s += grid[i][j]
i += 1
j -= 1
if s != val:
return False
return True
for k in range(min(m, n), 1, -1):
i = 0
while i + k - 1 < m:
j = 0
while j + k - 1 < n:
i2, j2 = i + k - 1, j + k - 1
if check(i, j, i2, j2):
return k
j += 1
i += 1
return 1
| Solution |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 5358,
"end": 5699
} | class ____(InvalidConfigError):
def __init__(self, message, missing_config_variable=None) -> None:
if not missing_config_variable:
missing_config_variable = []
self.message = message
self.missing_config_variable = missing_config_variable
super().__init__(self.message)
| MissingConfigVariableError |
python | numba__numba | numba/core/errors.py | {
"start": 18087,
"end": 18451
} | class ____(IRError):
"""
An undefined variable is encountered during interpretation of IR.
"""
def __init__(self, name, loc=None):
self.name = name
msg = ("The compiler failed to analyze the bytecode. "
"Variable '%s' is not defined." % name)
super(NotDefinedError, self).__init__(msg, loc=loc)
| NotDefinedError |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/GradientEditorItem.py | {
"start": 14399,
"end": 32717
} | class ____(TickSliderItem):
"""
**Bases:** :class:`TickSliderItem <pyqtgraph.TickSliderItem>`
An item that can be used to define a color gradient. Implements common pre-defined gradients that are
customizable by the user. :class: `GradientWidget <pyqtgraph.GradientWidget>` provides a widget
with a GradientEditorItem that can be added to a GUI.
================================ ===========================================================
**Signals:**
sigGradientChanged(self) Signal is emitted anytime the gradient changes. The signal
is emitted in real time while ticks are being dragged or
colors are being changed.
sigGradientChangeFinished(self) Signal is emitted when the gradient is finished changing.
================================ ===========================================================
"""
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, *args, **kargs):
"""
Create a new GradientEditorItem.
All arguments are passed to :func:`TickSliderItem.__init__ <pyqtgraph.TickSliderItem.__init__>`
=============== =================================================================================
**Arguments:**
orientation Set the orientation of the gradient. Options are: 'left', 'right'
'top', and 'bottom'.
allowAdd Default is True. Specifies whether ticks can be added to the item.
tickPen Default is white. Specifies the color of the outline of the ticks.
Can be any of the valid arguments for :func:`mkPen <pyqtgraph.mkPen>`
=============== =================================================================================
"""
self.currentTick = None
self.currentTickColor = None
self.rectSize = 15
self.gradRect = QtWidgets.QGraphicsRectItem(QtCore.QRectF(0, self.rectSize, 100, self.rectSize))
self.backgroundRect = QtWidgets.QGraphicsRectItem(QtCore.QRectF(0, -self.rectSize, 100, self.rectSize))
self.backgroundRect.setBrush(QtGui.QBrush(QtCore.Qt.BrushStyle.DiagCrossPattern))
self.colorMode = 'rgb'
TickSliderItem.__init__(self, *args, **kargs)
self.colorDialog = QtWidgets.QColorDialog()
self.colorDialog.setOption(QtWidgets.QColorDialog.ColorDialogOption.ShowAlphaChannel, True)
self.colorDialog.setOption(QtWidgets.QColorDialog.ColorDialogOption.DontUseNativeDialog, True)
self.colorDialog.currentColorChanged.connect(self.currentColorChanged)
self.colorDialog.rejected.connect(self.currentColorRejected)
self.colorDialog.accepted.connect(self.currentColorAccepted)
self.backgroundRect.setParentItem(self)
self.gradRect.setParentItem(self)
self.setMaxDim(self.rectSize + self.tickSize)
self.rgbAction = QtGui.QAction(translate("GradiantEditorItem", 'RGB'), self)
self.rgbAction.setCheckable(True)
self.rgbAction.triggered.connect(self._setColorModeToRGB)
self.hsvAction = QtGui.QAction(translate("GradiantEditorItem", 'HSV'), self)
self.hsvAction.setCheckable(True)
self.hsvAction.triggered.connect(self._setColorModeToHSV)
self.menu = ColorMapMenu(showGradientSubMenu=True, showColorMapSubMenus=True)
self.menu.sigColorMapTriggered.connect(self.colorMapMenuClicked)
self.menu.addSeparator()
self.menu.addAction(self.rgbAction)
self.menu.addAction(self.hsvAction)
for t in list(self.ticks.keys()):
self.removeTick(t)
self.addTick(0, QtGui.QColor(0,0,0), True)
self.addTick(1, QtGui.QColor(255,0,0), True)
self.setColorMode('rgb')
self.updateGradient()
self.linkedGradients = {}
self.sigTicksChanged.connect(self._updateGradientIgnoreArgs)
self.sigTicksChangeFinished.connect(self.sigGradientChangeFinished)
def showTicks(self, show=True):
for tick in self.ticks.keys():
if show:
tick.show()
orig = getattr(self, '_allowAdd_backup', None)
if orig:
self.allowAdd = orig
else:
self._allowAdd_backup = self.allowAdd
self.allowAdd = False #block tick creation
tick.hide()
def setOrientation(self, orientation):
## public
"""
Set the orientation of the GradientEditorItem.
============== ===================================================================
**Arguments:**
orientation Options are: 'left', 'right', 'top', 'bottom'
The orientation option specifies which side of the gradient the
ticks are on, as well as whether the gradient is vertical ('right'
and 'left') or horizontal ('top' and 'bottom').
============== ===================================================================
"""
TickSliderItem.setOrientation(self, orientation)
tr = QtGui.QTransform.fromTranslate(0, self.rectSize)
self.setTransform(tr, True)
def showMenu(self, ev):
#private
self.menu.popup(ev.screenPos().toQPoint())
@QtCore.Slot(object)
def colorMapMenuClicked(self, cmap):
#private
if cmap.name.startswith("preset-gradient:"):
name = cmap.name.split(":")[1]
self.loadPreset(name)
else:
self.setColorMap(cmap)
self.showTicks(False)
@addGradientListToDocstring()
def loadPreset(self, name):
"""
Load a predefined gradient. Currently defined gradients are:
"""## TODO: provide image with names of defined gradients
#global Gradients
self.restoreState(Gradients[name])
def setColorMode(self, cm):
"""
Set the color mode for the gradient. Options are: 'hsv', 'rgb'
"""
## public
if cm not in ['rgb', 'hsv']:
raise Exception("Unknown color mode %s. Options are 'rgb' and 'hsv'." % str(cm))
try:
self.rgbAction.blockSignals(True)
self.hsvAction.blockSignals(True)
self.rgbAction.setChecked(cm == 'rgb')
self.hsvAction.setChecked(cm == 'hsv')
finally:
self.rgbAction.blockSignals(False)
self.hsvAction.blockSignals(False)
self.colorMode = cm
self.sigTicksChanged.emit(self)
self.sigGradientChangeFinished.emit(self)
@QtCore.Slot()
def _setColorModeToRGB(self):
self.setColorMode("rgb")
@QtCore.Slot()
def _setColorModeToHSV(self):
self.setColorMode("hsv")
def colorMap(self):
"""Return a ColorMap object representing the current state of the editor."""
if self.colorMode == 'hsv':
raise NotImplementedError('hsv colormaps not yet supported')
pos = []
color = []
for t,x in self.listTicks():
pos.append(x)
c = t.color
color.append(c.getRgb())
return ColorMap(np.array(pos), np.array(color, dtype=np.ubyte))
def updateGradient(self):
#private
self.gradient = self.getGradient()
self.gradRect.setBrush(QtGui.QBrush(self.gradient))
self.sigGradientChanged.emit(self)
@QtCore.Slot(object)
def _updateGradientIgnoreArgs(self, *args, **kwargs):
self.updateGradient()
def setLength(self, newLen):
#private (but maybe public)
TickSliderItem.setLength(self, newLen)
self.backgroundRect.setRect(1, -self.rectSize, newLen, self.rectSize)
self.gradRect.setRect(1, -self.rectSize, newLen, self.rectSize)
self.sigTicksChanged.emit(self)
@QtCore.Slot(QtGui.QColor)
def currentColorChanged(self, color):
#private
if color.isValid() and self.currentTick is not None:
self.setTickColor(self.currentTick, color)
@QtCore.Slot()
def currentColorRejected(self):
#private
self.setTickColor(self.currentTick, self.currentTickColor)
@QtCore.Slot()
def currentColorAccepted(self):
self.sigGradientChangeFinished.emit(self)
@QtCore.Slot(object, object)
def tickClicked(self, tick, ev):
#private
if ev.button() == QtCore.Qt.MouseButton.LeftButton:
self.raiseColorDialog(tick)
elif ev.button() == QtCore.Qt.MouseButton.RightButton:
self.raiseTickContextMenu(tick, ev)
def raiseColorDialog(self, tick):
if not tick.colorChangeAllowed:
return
self.currentTick = tick
self.currentTickColor = tick.color
self.colorDialog.setCurrentColor(tick.color)
self.colorDialog.open()
def raiseTickContextMenu(self, tick, ev):
self.tickMenu = TickMenu(tick, self)
self.tickMenu.popup(ev.screenPos().toQPoint())
def tickMoveFinished(self, tick):
self.sigGradientChangeFinished.emit(self)
def getGradient(self):
"""Return a QLinearGradient object."""
g = QtGui.QLinearGradient(QtCore.QPointF(0,0), QtCore.QPointF(self.length,0))
if self.colorMode == 'rgb':
ticks = self.listTicks()
g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])
elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop
ticks = self.listTicks()
stops = []
stops.append((ticks[0][1], ticks[0][0].color))
for i in range(1,len(ticks)):
x1 = ticks[i-1][1]
x2 = ticks[i][1]
dx = (x2-x1) / 10.
for j in range(1,10):
x = x1 + dx*j
stops.append((x, self.getColor(x)))
stops.append((x2, self.getColor(x2)))
g.setStops(stops)
return g
def getColor(self, x, toQColor=True):
"""
Return a color for a given value.
============== ==================================================================
**Arguments:**
x Value (position on gradient) of requested color.
toQColor If true, returns a QColor object, else returns a (r,g,b,a) tuple.
============== ==================================================================
"""
ticks = self.listTicks()
if x <= ticks[0][1]:
c = ticks[0][0].color
if toQColor:
return QtGui.QColor(c) # always copy colors before handing them out
else:
return c.getRgb()
if x >= ticks[-1][1]:
c = ticks[-1][0].color
if toQColor:
return QtGui.QColor(c) # always copy colors before handing them out
else:
return c.getRgb()
x2 = ticks[0][1]
for i in range(1,len(ticks)):
x1 = x2
x2 = ticks[i][1]
if x1 <= x and x2 >= x:
break
dx = (x2-x1)
if dx == 0:
f = 0.
else:
f = (x-x1) / dx
c1 = ticks[i-1][0].color
c2 = ticks[i][0].color
if self.colorMode == 'rgb':
r = c1.red() * (1.-f) + c2.red() * f
g = c1.green() * (1.-f) + c2.green() * f
b = c1.blue() * (1.-f) + c2.blue() * f
a = c1.alpha() * (1.-f) + c2.alpha() * f
if toQColor:
return QtGui.QColor(int(r), int(g), int(b), int(a))
else:
return (r,g,b,a)
elif self.colorMode == 'hsv':
h1,s1,v1,_ = c1.getHsv()
h2,s2,v2,_ = c2.getHsv()
h = h1 * (1.-f) + h2 * f
s = s1 * (1.-f) + s2 * f
v = v1 * (1.-f) + v2 * f
c = QtGui.QColor.fromHsv(int(h), int(s), int(v))
if toQColor:
return c
else:
return c.getRgb()
def getLookupTable(self, nPts, alpha=None):
"""
Return an RGB(A) lookup table (ndarray).
============== ============================================================================
**Arguments:**
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table.If alpha is None, alpha will be automatically determined.
============== ============================================================================
"""
if alpha is None:
alpha = self.usesAlpha()
if alpha:
table = np.empty((nPts,4), dtype=np.ubyte)
else:
table = np.empty((nPts,3), dtype=np.ubyte)
for i in range(nPts):
x = float(i)/(nPts-1)
color = self.getColor(x, toQColor=False)
table[i] = color[:table.shape[1]]
return table
def usesAlpha(self):
"""Return True if any ticks have an alpha < 255"""
ticks = self.listTicks()
for t in ticks:
if t[0].color.alpha() < 255:
return True
return False
def isLookupTrivial(self):
"""Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0"""
ticks = self.listTicks()
if len(ticks) != 2:
return False
if ticks[0][1] != 0.0 or ticks[1][1] != 1.0:
return False
c1 = ticks[0][0].color.getRgb()
c2 = ticks[1][0].color.getRgb()
if c1 != (0,0,0,255) or c2 != (255,255,255,255):
return False
return True
def addTick(self, x, color=None, movable=True, finish=True):
"""
Add a tick to the gradient. Return the tick.
============== ==================================================================
**Arguments:**
x Position where tick should be added.
color Color of added tick. If color is not specified, the color will be
the color of the gradient at the specified position.
movable Specifies whether the tick is movable with the mouse.
============== ==================================================================
"""
if color is None:
color = self.getColor(x)
t = TickSliderItem.addTick(self, x, color=color, movable=movable, finish=finish)
t.colorChangeAllowed = True
return t
def saveState(self):
"""
Return a dictionary with parameters for rebuilding the gradient. Keys will include:
- 'mode': hsv or rgb
- 'ticks': a list of tuples (pos, (r,g,b,a))
"""
## public
ticks = []
for t in self.ticks:
c = t.color
ticks.append((self.ticks[t], c.getRgb()))
state = {'mode': self.colorMode,
'ticks': ticks,
'ticksVisible': next(iter(self.ticks)).isVisible()}
return state
def restoreState(self, state):
"""
Restore the gradient specified in state.
============== ====================================================================
**Arguments:**
state A dictionary with same structure as those returned by
:func:`saveState <pyqtgraph.GradientEditorItem.saveState>`
Keys must include:
- 'mode': hsv or rgb
- 'ticks': a list of tuples (pos, (r,g,b,a))
============== ====================================================================
"""
## public
# Mass edit ticks without graphics update
signalsBlocked = self.blockSignals(True)
self.setColorMode(state['mode'])
for t in list(self.ticks.keys()):
self.removeTick(t, finish=False)
for t in state['ticks']:
c = QtGui.QColor(*t[1])
self.addTick(t[0], c, finish=False)
self.showTicks( state.get('ticksVisible',
next(iter(self.ticks)).isVisible()) )
# Close with graphics update
self.blockSignals(signalsBlocked)
self.sigTicksChanged.emit(self)
self.sigGradientChangeFinished.emit(self)
def setColorMap(self, cm):
# Mass edit ticks without graphics update
signalsBlocked = self.blockSignals(True)
self.setColorMode('rgb')
for t in list(self.ticks.keys()):
self.removeTick(t, finish=False)
colors = cm.getColors(mode='qcolor')
for i in range(len(cm.pos)):
x = cm.pos[i]
c = colors[i]
self.addTick(x, c, finish=False)
# Close with graphics update
self.blockSignals(signalsBlocked)
self.sigTicksChanged.emit(self)
self.sigGradientChangeFinished.emit(self)
def linkGradient(self, slaveGradient, connect=True):
if connect:
fn = lambda g, slave=slaveGradient:slave.restoreState(
g.saveState())
self.linkedGradients[id(slaveGradient)] = fn
self.sigGradientChanged.connect(fn)
self.sigGradientChanged.emit(self)
else:
fn = self.linkedGradients.get(id(slaveGradient), None)
if fn:
self.sigGradientChanged.disconnect(fn)
| GradientEditorItem |
python | huggingface__transformers | src/transformers/models/fuyu/processing_fuyu.py | {
"start": 14009,
"end": 36433
} | class ____(ProcessorMixin):
r"""
Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor.
[`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information.
Args:
image_processor ([`FuyuImageProcessor`]):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`]):
The tokenizer is a required input.
"""
def __init__(self, image_processor, tokenizer, **kwargs):
super().__init__(image_processor=image_processor, tokenizer=tokenizer)
self.image_processor = image_processor
self.tokenizer = tokenizer
self.max_tokens_to_generate = 10
self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it?
self.pad_token_id = 0
self.dummy_image_index = -1
self.image_token_id = tokenizer.encode("|SPEAKER|", add_special_tokens=False)[1]
self.image_newline_id = tokenizer.encode("|NEWLINE|", add_special_tokens=False)[1]
def _left_pad_inputs_with_attention_mask(self, model_inputs: list[dict], return_attention_mask: bool):
max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs)
max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs)
batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []}
for entry in model_inputs:
for key, tensor in entry.items():
if key == "input_ids":
num_padding_tokens = max_length_input_ids - tensor.shape[1]
padded_input_ids = torch.cat(
[
torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long),
tensor,
],
dim=1,
)
batched_inputs[key].append(padded_input_ids)
attention_mask = torch.cat(
[torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)],
dim=1,
)
batched_inputs["attention_mask"].append(attention_mask)
elif key == "image_patches":
# For image_patches, we don't pad but just append them to the list.
batched_inputs[key].append(tensor)
else: # for image_patches_indices
num_padding_indices = max_length_image_patch_indices - tensor.shape[1]
padded_indices = torch.cat(
[
torch.full(
(tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long
),
tensor,
],
dim=1,
)
batched_inputs[key].append(padded_indices)
batched_keys = ["input_ids", "image_patches_indices"]
if return_attention_mask:
batched_keys.append("attention_mask")
for key in batched_keys:
batched_inputs[key] = torch.cat(batched_inputs[key], dim=0)
# Cast images to tensor as well, if only one image passed and no padding needed
# NOTE: vLLM expects all processor outputs to be a tensor
if len(batched_inputs["image_patches"]) == 1:
batched_inputs["image_patches"] = torch.cat(batched_inputs["image_patches"], dim=0)
return batched_inputs
def get_sample_encoding(
self,
prompts,
scale_factors,
image_unpadded_heights,
image_unpadded_widths,
image_placeholder_id,
image_newline_id,
tensor_batch_images,
):
image_present = torch.ones(1, 1, 1)
model_image_input = self.image_processor.preprocess_with_tokenizer_info(
image_input=tensor_batch_images,
image_present=image_present,
image_unpadded_h=image_unpadded_heights,
image_unpadded_w=image_unpadded_widths,
image_placeholder_id=image_placeholder_id,
image_newline_id=image_newline_id,
variable_sized=True,
)
# FIXME max_tokens_to_generate is embedded into this processor's call.
prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch(
tokenizer=self.tokenizer,
prompts=prompts,
scale_factors=scale_factors,
max_tokens_to_generate=self.max_tokens_to_generate,
max_position_embeddings=self.max_position_embeddings,
add_BOS=True,
add_beginning_of_answer_token=True,
)
image_padded_unpacked_tokens = construct_full_unpacked_stream(
num_real_text_tokens=prompts_length,
input_stream=prompt_tokens,
image_tokens=model_image_input["image_input_ids"],
batch_size=1,
num_sub_sequences=self.subsequence_length,
)
# Construct inputs for image patch indices.
unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream(
num_real_text_tokens=prompts_length,
input_stream=torch.full_like(prompt_tokens, -1),
image_tokens=model_image_input["image_patch_indices_per_batch"],
batch_size=1,
num_sub_sequences=self.subsequence_length,
)
max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens)
max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings)
tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0]))
# Use same packing logic for the image patch indices.
image_patch_input_indices = full_unpacked_stream_to_tensor(
all_bi_tokens_to_place=[tokens_to_place],
full_unpacked_stream=unpacked_image_patch_indices_per_batch,
fill_value=-1,
batch_size=1,
new_seq_len=max_seq_len_batch,
offset=0,
)
image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]])
batch_encoding = {
"input_ids": image_padded_unpacked_tokens[0].unsqueeze(0),
"image_patches": image_patches_tensor,
"image_patches_indices": image_patch_input_indices,
}
return batch_encoding
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]] = None,
**kwargs: Unpack[FuyuProcessorKwargs],
) -> "FuyuBatchFeature":
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to
encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `list[PIL.Image.Image]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Returns:
[`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields:
- **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`.
- **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`.
- **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model when
`return_attention_mask=True`.
"""
requires_backends(self, ["torch"])
# --- Check input validity ---
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be None.")
output_kwargs = self._merge_kwargs(
FuyuProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
if not output_kwargs["text_kwargs"].setdefault("return_attention_mask", True):
raise ValueError("`return_attention_mask=False` is not supported for this model.")
if text is not None and images is None:
logger.warning("You are processing a text with no associated image. Make sure it is intended.")
text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"])
return text_encoding
if text is None and images is not None:
logger.warning("You are processing an image with no associated text. Make sure it is intended.")
prompts = [[""]]
if text is not None and images is not None:
if isinstance(text, str):
prompts = [[text]]
elif isinstance(text, list):
prompts = [[text_seq] for text_seq in text]
# --- Preprocess images using self.image_processor ---
# FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors
output_kwargs["images_kwargs"]["return_tensors"] = "pt"
image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"])
batch_images = image_encoding["images"]
image_unpadded_heights = image_encoding["image_unpadded_heights"]
image_unpadded_widths = image_encoding["image_unpadded_widths"]
scale_factors = image_encoding["image_scale_factors"]
self.subsequence_length = 1 # Each batch contains only one sequence.
self.batch_size = len(batch_images)
# --- Use self.tokenizer to get the ids of special tokens to insert into image ids ---
tensor_batch_images = torch.stack([img[0] for img in batch_images if img]).unsqueeze(1)
# --- Use self.image_processor again to obtain the full token ids and batch inputs ---
all_encodings = []
for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip(
prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images
):
sample_encoding = self.get_sample_encoding(
prompts=[prompt],
scale_factors=[scale_factor],
image_unpadded_heights=torch.tensor([image_unpadded_height]),
image_unpadded_widths=torch.tensor([image_unpadded_width]),
image_placeholder_id=self.image_token_id,
image_newline_id=self.image_newline_id,
tensor_batch_images=tensor_batch_image.unsqueeze(0),
)
all_encodings.append(sample_encoding)
batch_encoding = self._left_pad_inputs_with_attention_mask(
model_inputs=all_encodings, return_attention_mask=True
)
if return_mm_token_type_ids:
input_ids = batch_encoding["input_ids"]
mm_token_type_ids = torch.zeros_like(input_ids)
mm_token_type_ids[input_ids == self.image_token_id] = 1
mm_token_type_ids[input_ids == self.image_newline_id] = 1
batch_encoding["mm_token_type_ids"] = mm_token_type_ids
return FuyuBatchFeature(data=batch_encoding)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
size = kwargs.get("size") or self.image_processor.size
padded_height, padded_width = size["height"], size["width"]
num_image_tokens = []
num_image_patches = [1] * len(image_sizes)
for image_size in image_sizes:
height_scale_factor = padded_height / image_size[0]
width_scale_factor = padded_width / image_size[1]
optimal_scale_factor = min(height_scale_factor, width_scale_factor)
image_unpadded_h = min(int(image_size[0] * optimal_scale_factor), image_size[0])
image_unpadded_w = min(int(image_size[0] * optimal_scale_factor), image_size[0])
# We can use torch here because Fuyu processor has hard dependency on torch. NOTE: Fuyu can't do multi-image
# thus the below (1, 1, 1) is hardcoded. Same as when calling the processor
model_image_input = self.image_processor.preprocess_with_tokenizer_info(
image_input=torch.zeros(1, 1, 3, padded_height, padded_width),
image_present=torch.ones(1, 1, 1),
image_unpadded_h=torch.tensor([[image_unpadded_h]]),
image_unpadded_w=torch.tensor([[image_unpadded_w]]),
image_placeholder_id=0, # dummy ids, we can be sure `id=0` is never out-of-range
image_newline_id=0,
variable_sized=True,
)
num_image_tokens.append(model_image_input["image_input_ids"][0][0].shape[-1])
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
def post_process_box_coordinates(self, outputs, target_sizes=None):
"""
Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space.
Coordinates will be returned in "box" format, with the following pattern:
`<box>top, left, bottom, right</box>`
Point coordinates are not supported yet.
Args:
outputs ([`GenerateOutput`]):
Raw outputs from `generate`.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left
to None, coordinates will not be rescaled.
Returns:
`GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with
boxed and possible rescaled coordinates.
"""
def scale_factor_to_fit(original_size, target_size=None):
height, width = original_size
if target_size is None:
max_height = self.image_processor.size["height"]
max_width = self.image_processor.size["width"]
else:
max_height, max_width = target_size
if width <= max_width and height <= max_height:
return 1.0
return min(max_height / height, max_width / width)
def find_delimiters_pair(tokens, start_token, end_token):
start_id = self.tokenizer.convert_tokens_to_ids(start_token)
end_id = self.tokenizer.convert_tokens_to_ids(end_token)
starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0]
ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0]
if torch.any(starting_positions) and torch.any(ending_positions):
return (starting_positions[0], ending_positions[0])
return (None, None)
def tokens_to_boxes(tokens, original_size):
while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != (
None,
None,
):
start, end = pair
if end != start + 5:
continue
# Retrieve transformed coordinates from tokens
coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
# Scale back to original image size and multiply by 2
scale = scale_factor_to_fit(original_size)
top, left, bottom, right = [2 * int(float(c) / scale) for c in coords]
# Replace the IDs so they get detokenized right
replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}"
replacement = self.tokenizer.tokenize(replacement)[1:]
replacement = self.tokenizer.convert_tokens_to_ids(replacement)
replacement = torch.tensor(replacement).to(tokens)
tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
return tokens
def tokens_to_points(tokens, original_size):
while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != (
None,
None,
):
start, end = pair
if end != start + 3:
continue
# Retrieve transformed coordinates from tokens
coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
# Scale back to original image size and multiply by 2
scale = scale_factor_to_fit(original_size)
x, y = [2 * int(float(c) / scale) for c in coords]
# Replace the IDs so they get detokenized right
replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}"
replacement = self.tokenizer.tokenize(replacement)[1:]
replacement = self.tokenizer.convert_tokens_to_ids(replacement)
replacement = torch.tensor(replacement).to(tokens)
tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
return tokens
if target_sizes is None:
target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs)
elif target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
if len(outputs) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as output sequences")
results = []
for seq, size in zip(outputs, target_sizes):
seq = tokens_to_boxes(seq, size)
seq = tokens_to_points(seq, size)
results.append(seq)
return results
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):
"""
Post-processes the output of `FuyuForConditionalGeneration` to only return the text output.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
containing the token ids of the generated sequences.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text output.
"""
beginning_of_answer = self.tokenizer.convert_tokens_to_ids(BEGINNING_OF_ANSWER_STRING)
# get boa index for each outputted sequence tensor
# start all generated sequences from the beginning of the answer token, pad to have consistent length
unpadded_output_sequences = [
seq[(seq == beginning_of_answer).nonzero(as_tuple=True)[0] + 1 :] for seq in generated_outputs
]
max_len = max(len(seq) for seq in unpadded_output_sequences)
# convert to torch and pad sequences
padded_output_sequences = torch.full((len(unpadded_output_sequences), max_len), self.pad_token_id)
for i, seq in enumerate(unpadded_output_sequences):
padded_output_sequences[i, : len(seq)] = torch.tensor(seq)
return self.batch_decode(padded_output_sequences, skip_special_tokens=skip_special_tokens, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# Make a copy of list when removing otherwise `self.image_processor.model_input_names` is also modified
extra_image_inputs = [
"image_input_ids",
"image_patch_indices_per_subsequence",
"images",
"image_patch_indices_per_batch",
]
image_processor_input_names = [name for name in image_processor_input_names if name not in extra_image_inputs]
return list(tokenizer_input_names + image_processor_input_names + ["image_patches_indices"])
__all__ = ["FuyuProcessor"]
| FuyuProcessor |
python | spack__spack | lib/spack/spack/test/jobserver.py | {
"start": 4340,
"end": 8767
} | class ____:
"""Test JobServer class functionality."""
def test_creates_new_jobserver(self):
"""Should create a new FIFO-based jobserver when none exists."""
js = JobServer(4)
try:
assert js.created is True
assert js.fifo_path is not None
assert os.path.exists(js.fifo_path)
assert js.tokens_acquired == 0
assert fcntl.fcntl(js.r, fcntl.F_GETFD) != -1
assert fcntl.fcntl(js.w, fcntl.F_GETFD) != -1
finally:
js.close()
def test_attaches_to_existing_fifo(self):
"""Should attach to existing FIFO jobserver from environment."""
js1 = JobServer(4)
assert js1.fifo_path
try:
fifo_config = get_jobserver_config(f" -j4 --jobserver-auth=fifo:{js1.fifo_path}")
assert fifo_config == js1.fifo_path
result = open_existing_jobserver_fifo(js1.fifo_path)
assert result is not None
r, w = result
os.close(r)
os.close(w)
finally:
js1.close()
def test_acquire_tokens(self):
"""Should acquire tokens from jobserver."""
js = JobServer(5)
try:
assert js.acquire(2) == 2
assert js.tokens_acquired == 2
assert js.acquire(2) == 2
assert js.tokens_acquired == 4
assert js.acquire(2) == 0
assert js.tokens_acquired == 4
finally:
js.close()
def test_release_tokens(self):
"""Should release tokens back to jobserver."""
js = JobServer(5)
try:
assert js.acquire(2) == 2
assert js.tokens_acquired == 2
js.release()
assert js.tokens_acquired == 1
assert js.acquire(1) == 1
assert js.tokens_acquired == 2
finally:
js.close()
def test_release_without_tokens_is_noop(self):
"""Releasing without acquired tokens should be a no-op."""
js = JobServer(4)
try:
assert js.tokens_acquired == 0
js.release()
assert js.tokens_acquired == 0
finally:
js.close()
def test_makeflags_fifo_gmake_44(self):
"""Should return FIFO format for gmake >= 4.4."""
js = JobServer(8)
try:
flags = js.makeflags(Spec("gmake@=4.4"))
assert flags == f" -j8 --jobserver-auth=fifo:{js.fifo_path}"
finally:
js.close()
def test_makeflags_pipe_gmake_40(self):
"""Should return pipe format for gmake 4.0-4.3."""
js = JobServer(8)
try:
flags = js.makeflags(Spec("gmake@=4.0"))
assert flags == f" -j8 --jobserver-auth={js.r},{js.w}"
finally:
js.close()
def test_makeflags_old_format_gmake_3(self):
"""Should return old --jobserver-fds format for gmake < 4.0."""
js = JobServer(8)
try:
flags = js.makeflags(Spec("gmake@=3.9"))
assert flags == f" -j8 --jobserver-fds={js.r},{js.w}"
finally:
js.close()
def test_makeflags_no_gmake(self):
"""Should return FIFO format when no gmake (modern default)."""
js = JobServer(6)
try:
flags = js.makeflags(None)
assert flags == f" -j6 --jobserver-auth=fifo:{js.fifo_path}"
finally:
js.close()
def test_close_removes_created_fifo(self):
"""Should remove FIFO and directory if created by this instance."""
js = JobServer(4)
fifo_path = js.fifo_path
assert fifo_path and os.path.exists(fifo_path)
js.close()
assert not os.path.exists(os.path.dirname(fifo_path))
def test_file_descriptors_are_inheritable(self):
"""Should set file descriptors as inheritable for child processes."""
js = JobServer(4)
try:
assert os.get_inheritable(js.r)
assert os.get_inheritable(js.w)
finally:
js.close()
def test_connection_objects_exist(self):
"""Should create Connection objects for fd inheritance."""
js = JobServer(4)
try:
assert js.r_conn is not None and js.r_conn.fileno() == js.r
assert js.w_conn is not None and js.w_conn.fileno() == js.w
finally:
js.close()
| TestJobServer |
python | sanic-org__sanic | sanic/touchup/schemes/ode.py | {
"start": 1564,
"end": 2857
} | class ____(NodeTransformer):
def __init__(self, registered_events) -> None:
self._registered_events = registered_events
def visit_Expr(self, node: Expr) -> Any:
call = node.value
if isinstance(call, Await):
call = call.value
func = getattr(call, "func", None)
args = getattr(call, "args", None)
if not func or not args:
return node
if isinstance(func, Attribute) and func.attr == "dispatch":
event = args[0]
if event_name := getattr(event, "value", None):
if self._not_registered(event_name):
logger.debug(
f"Disabling event: {event_name}",
extra={"verbosity": 2},
)
return None
return node
def _not_registered(self, event_name):
dynamic = []
for event in self._registered_events:
if event.endswith(">"):
namespace_concern, _ = event.rsplit(".", 1)
dynamic.append(namespace_concern)
namespace_concern, _ = event_name.rsplit(".", 1)
return (
event_name not in self._registered_events
and namespace_concern not in dynamic
)
| RemoveDispatch |
python | pennersr__django-allauth | allauth/socialaccount/providers/draugiem/views.py | {
"start": 549,
"end": 2915
} | class ____(Exception):
pass
ACCESS_TOKEN_URL = "https://api.draugiem.lv/json" # nosec
AUTHORIZE_URL = "https://api.draugiem.lv/authorize"
def login(request):
app = get_adapter().get_app(request, DraugiemProvider.id)
redirect_url = request.build_absolute_uri(reverse(callback))
# Draugiem mandates a weak hashing algorithm.
redirect_url_hash = md5(
(app.secret + redirect_url).encode("utf-8")
).hexdigest() # nosec
params = {
"app": app.client_id,
"hash": redirect_url_hash,
"redirect": redirect_url,
}
SocialLogin.stash_state(request)
return HttpResponseRedirect("%s?%s" % (AUTHORIZE_URL, urlencode(params)))
@csrf_exempt
def callback(request):
adapter = get_adapter()
provider = adapter.get_provider(request, DraugiemProvider.id)
if "dr_auth_status" not in request.GET:
return render_authentication_error(request, provider, error=AuthError.UNKNOWN)
if request.GET["dr_auth_status"] != "ok":
return render_authentication_error(request, provider, error=AuthError.DENIED)
if "dr_auth_code" not in request.GET:
return render_authentication_error(request, provider, error=AuthError.UNKNOWN)
ret = None
auth_exception = None
try:
app = provider.app
login = draugiem_complete_login(request, app, request.GET["dr_auth_code"])
login.state = SocialLogin.unstash_state(request)
ret = complete_social_login(request, login)
except (requests.RequestException, DraugiemApiError) as e:
auth_exception = e
if not ret:
ret = render_authentication_error(request, provider, exception=auth_exception)
return ret
def draugiem_complete_login(request, app, code):
provider = get_adapter().get_provider(request, DraugiemProvider.id)
response = (
get_adapter()
.get_requests_session()
.get(
ACCESS_TOKEN_URL,
{"action": "authorize", "app": app.secret, "code": code},
)
)
response.raise_for_status()
response_json = response.json()
if "error" in response_json:
raise DraugiemApiError(response_json["error"])
token = SocialToken(app=app, token=response_json["apikey"])
login = provider.sociallogin_from_response(request, response_json)
login.token = token
return login
| DraugiemApiError |
python | django__django | tests/admin_views/models.py | {
"start": 2014,
"end": 2281
} | class ____(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name="¿Name?")
def __str__(self):
return self.name
def get_absolute_url(self):
return f"/books/{self.id}/"
| Book |
python | arrow-py__arrow | tests/test_parser.py | {
"start": 5704,
"end": 30955
} | class ____:
def test_parse_list(self, mocker):
mocker.patch(
"arrow.parser.DateTimeParser._parse_multiformat",
string="str",
formats=["fmt_a", "fmt_b"],
return_value="result",
)
result = self.parser.parse("str", ["fmt_a", "fmt_b"])
assert result == "result"
def test_parse_unrecognized_token(self, mocker):
mocker.patch.dict("arrow.parser.DateTimeParser._BASE_INPUT_RE_MAP")
del arrow.parser.DateTimeParser._BASE_INPUT_RE_MAP["YYYY"]
# need to make another local parser to apply patch changes
_parser = parser.DateTimeParser()
with pytest.raises(parser.ParserError):
_parser.parse("2013-01-01", "YYYY-MM-DD")
def test_parse_parse_no_match(self):
with pytest.raises(ParserError):
self.parser.parse("01-01", "YYYY-MM-DD")
def test_parse_separators(self):
with pytest.raises(ParserError):
self.parser.parse("1403549231", "YYYY-MM-DD")
def test_parse_numbers(self):
self.expected = datetime(2012, 1, 1, 12, 5, 10)
assert (
self.parser.parse("2012-01-01 12:05:10", "YYYY-MM-DD HH:mm:ss")
== self.expected
)
def test_parse_am(self):
with pytest.raises(ParserMatchError):
self.parser.parse("2021-01-30 14:00:00 AM", "YYYY-MM-DD HH:mm:ss A")
def test_parse_year_two_digit(self):
self.expected = datetime(1979, 1, 1, 12, 5, 10)
assert (
self.parser.parse("79-01-01 12:05:10", "YY-MM-DD HH:mm:ss") == self.expected
)
def test_parse_timestamp(self):
tz_utc = tz.tzutc()
float_timestamp = time.time()
int_timestamp = int(float_timestamp)
self.expected = datetime.fromtimestamp(int_timestamp, tz=tz_utc)
assert self.parser.parse(f"{int_timestamp:d}", "X") == self.expected
self.expected = datetime.fromtimestamp(float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{float_timestamp:f}", "X") == self.expected
# test handling of ns timestamp (arrow will round to 6 digits regardless)
self.expected = datetime.fromtimestamp(float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{float_timestamp:f}123", "X") == self.expected
# test ps timestamp (arrow will round to 6 digits regardless)
self.expected = datetime.fromtimestamp(float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{float_timestamp:f}123456", "X") == self.expected
# NOTE: timestamps cannot be parsed from natural language strings (by removing the ^...$) because it will
# break cases like "15 Jul 2000" and a format list (see issue #447)
with pytest.raises(ParserError):
natural_lang_string = "Meet me at {} at the restaurant.".format(
float_timestamp
)
self.parser.parse(natural_lang_string, "X")
with pytest.raises(ParserError):
self.parser.parse("1565982019.", "X")
with pytest.raises(ParserError):
self.parser.parse(".1565982019", "X")
# NOTE: negative timestamps cannot be handled by datetime on Windows
# Must use timedelta to handle them: https://stackoverflow.com/questions/36179914
@pytest.mark.skipif(
os.name == "nt", reason="negative timestamps are not supported on Windows"
)
def test_parse_negative_timestamp(self):
# regression test for issue #662
tz_utc = tz.tzutc()
float_timestamp = time.time()
int_timestamp = int(float_timestamp)
negative_int_timestamp = -int_timestamp
self.expected = datetime.fromtimestamp(negative_int_timestamp, tz=tz_utc)
assert self.parser.parse(f"{negative_int_timestamp:d}", "X") == self.expected
negative_float_timestamp = -float_timestamp
self.expected = datetime.fromtimestamp(negative_float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{negative_float_timestamp:f}", "X") == self.expected
def test_parse_expanded_timestamp(self):
# test expanded timestamps that include milliseconds
# and microseconds as multiples rather than decimals
# requested in issue #357
tz_utc = tz.tzutc()
timestamp = 1569982581.413132
timestamp_milli = round(timestamp * 1000)
timestamp_micro = round(timestamp * 1_000_000)
# "x" token should parse integer timestamps below MAX_TIMESTAMP normally
self.expected = datetime.fromtimestamp(int(timestamp), tz=tz_utc)
assert self.parser.parse(f"{int(timestamp):d}", "x") == self.expected
self.expected = datetime.fromtimestamp(round(timestamp, 3), tz=tz_utc)
assert self.parser.parse(f"{timestamp_milli:d}", "x") == self.expected
self.expected = datetime.fromtimestamp(timestamp, tz=tz_utc)
assert self.parser.parse(f"{timestamp_micro:d}", "x") == self.expected
# anything above max µs timestamp should fail
with pytest.raises(ValueError):
self.parser.parse(f"{int(MAX_TIMESTAMP_US) + 1:d}", "x")
# floats are not allowed with the "x" token
with pytest.raises(ParserMatchError):
self.parser.parse(f"{timestamp:f}", "x")
def test_parse_names(self):
self.expected = datetime(2012, 1, 1)
assert self.parser.parse("January 1, 2012", "MMMM D, YYYY") == self.expected
assert self.parser.parse("Jan 1, 2012", "MMM D, YYYY") == self.expected
def test_parse_pm(self):
self.expected = datetime(1, 1, 1, 13, 0, 0)
assert self.parser.parse("1 pm", "H a") == self.expected
assert self.parser.parse("1 pm", "h a") == self.expected
self.expected = datetime(1, 1, 1, 1, 0, 0)
assert self.parser.parse("1 am", "H A") == self.expected
assert self.parser.parse("1 am", "h A") == self.expected
self.expected = datetime(1, 1, 1, 0, 0, 0)
assert self.parser.parse("12 am", "H A") == self.expected
assert self.parser.parse("12 am", "h A") == self.expected
self.expected = datetime(1, 1, 1, 12, 0, 0)
assert self.parser.parse("12 pm", "H A") == self.expected
assert self.parser.parse("12 pm", "h A") == self.expected
def test_parse_tz_hours_only(self):
self.expected = datetime(2025, 10, 17, 5, 30, 10, tzinfo=tz.tzoffset(None, 0))
parsed = self.parser.parse("2025-10-17 05:30:10+00", "YYYY-MM-DD HH:mm:ssZ")
assert parsed == self.expected
def test_parse_tz_zz(self):
self.expected = datetime(2013, 1, 1, tzinfo=tz.tzoffset(None, -7 * 3600))
assert self.parser.parse("2013-01-01 -07:00", "YYYY-MM-DD ZZ") == self.expected
@pytest.mark.parametrize("full_tz_name", make_full_tz_list())
def test_parse_tz_name_zzz(self, full_tz_name):
self.expected = datetime(2013, 1, 1, tzinfo=ZoneInfo(full_tz_name))
assert (
self.parser.parse(f"2013-01-01 {full_tz_name}", "YYYY-MM-DD ZZZ")
== self.expected
)
# note that offsets are not timezones
with pytest.raises(ParserError):
self.parser.parse("2013-01-01 12:30:45.9+1000", "YYYY-MM-DDZZZ")
with pytest.raises(ParserError):
self.parser.parse("2013-01-01 12:30:45.9+10:00", "YYYY-MM-DDZZZ")
with pytest.raises(ParserError):
self.parser.parse("2013-01-01 12:30:45.9-10", "YYYY-MM-DDZZZ")
def test_parse_subsecond(self):
self.expected = datetime(2013, 1, 1, 12, 30, 45, 900000)
assert (
self.parser.parse("2013-01-01 12:30:45.9", "YYYY-MM-DD HH:mm:ss.S")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 980000)
assert (
self.parser.parse("2013-01-01 12:30:45.98", "YYYY-MM-DD HH:mm:ss.SS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987000)
assert (
self.parser.parse("2013-01-01 12:30:45.987", "YYYY-MM-DD HH:mm:ss.SSS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987600)
assert (
self.parser.parse("2013-01-01 12:30:45.9876", "YYYY-MM-DD HH:mm:ss.SSSS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987650)
assert (
self.parser.parse("2013-01-01 12:30:45.98765", "YYYY-MM-DD HH:mm:ss.SSSSS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
assert (
self.parser.parse(
"2013-01-01 12:30:45.987654", "YYYY-MM-DD HH:mm:ss.SSSSSS"
)
== self.expected
)
def test_parse_subsecond_rounding(self):
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
datetime_format = "YYYY-MM-DD HH:mm:ss.S"
# round up
string = "2013-01-01 12:30:45.9876539"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# round down
string = "2013-01-01 12:30:45.98765432"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# round half-up
string = "2013-01-01 12:30:45.987653521"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# round half-down
string = "2013-01-01 12:30:45.9876545210"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# overflow (zero out the subseconds and increment the seconds)
# regression tests for issue #636
def test_parse_subsecond_rounding_overflow(self):
datetime_format = "YYYY-MM-DD HH:mm:ss.S"
self.expected = datetime(2013, 1, 1, 12, 30, 46)
string = "2013-01-01 12:30:45.9999995"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
self.expected = datetime(2013, 1, 1, 12, 31, 0)
string = "2013-01-01 12:30:59.9999999"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
self.expected = datetime(2013, 1, 2, 0, 0, 0)
string = "2013-01-01 23:59:59.9999999"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# 6 digits should remain unrounded
self.expected = datetime(2013, 1, 1, 12, 30, 45, 999999)
string = "2013-01-01 12:30:45.999999"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# Regression tests for issue #560
def test_parse_long_year(self):
with pytest.raises(ParserError):
self.parser.parse("09 January 123456789101112", "DD MMMM YYYY")
with pytest.raises(ParserError):
self.parser.parse("123456789101112 09 January", "YYYY DD MMMM")
with pytest.raises(ParserError):
self.parser.parse("68096653015/01/19", "YY/M/DD")
def test_parse_with_extra_words_at_start_and_end_invalid(self):
input_format_pairs = [
("blah2016", "YYYY"),
("blah2016blah", "YYYY"),
("2016blah", "YYYY"),
("2016-05blah", "YYYY-MM"),
("2016-05-16blah", "YYYY-MM-DD"),
("2016-05-16T04:05:06.789120blah", "YYYY-MM-DDThh:mm:ss.S"),
("2016-05-16T04:05:06.789120ZblahZ", "YYYY-MM-DDThh:mm:ss.SZ"),
("2016-05-16T04:05:06.789120Zblah", "YYYY-MM-DDThh:mm:ss.SZ"),
("2016-05-16T04:05:06.789120blahZ", "YYYY-MM-DDThh:mm:ss.SZ"),
]
for pair in input_format_pairs:
with pytest.raises(ParserError):
self.parser.parse(pair[0], pair[1])
def test_parse_with_extra_words_at_start_and_end_valid(self):
# Spaces surrounding the parsable date are ok because we
# allow the parsing of natural language input. Additionally, a single
# character of specific punctuation before or after the date is okay.
# See docs for full list of valid punctuation.
assert self.parser.parse("blah 2016 blah", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse("blah 2016", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse("2016 blah", "YYYY") == datetime(2016, 1, 1)
# test one additional space along with space divider
assert self.parser.parse(
"blah 2016-05-16 04:05:06.789120", "YYYY-MM-DD hh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
"2016-05-16 04:05:06.789120 blah", "YYYY-MM-DD hh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
# test one additional space along with T divider
assert self.parser.parse(
"blah 2016-05-16T04:05:06.789120", "YYYY-MM-DDThh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
"2016-05-16T04:05:06.789120 blah", "YYYY-MM-DDThh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
"Meet me at 2016-05-16T04:05:06.789120 at the restaurant.",
"YYYY-MM-DDThh:mm:ss.S",
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
"Meet me at 2016-05-16 04:05:06.789120 at the restaurant.",
"YYYY-MM-DD hh:mm:ss.S",
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
# regression test for issue #701
# tests cases of a partial match surrounded by punctuation
# for the list of valid punctuation, see documentation
def test_parse_with_punctuation_fences(self):
assert self.parser.parse(
"Meet me at my house on Halloween (2019-31-10)", "YYYY-DD-MM"
) == datetime(2019, 10, 31)
assert self.parser.parse(
"Monday, 9. September 2019, 16:15-20:00", "dddd, D. MMMM YYYY"
) == datetime(2019, 9, 9)
assert self.parser.parse("A date is 11.11.2011.", "DD.MM.YYYY") == datetime(
2011, 11, 11
)
with pytest.raises(ParserMatchError):
self.parser.parse("11.11.2011.1 is not a valid date.", "DD.MM.YYYY")
with pytest.raises(ParserMatchError):
self.parser.parse(
"This date has too many punctuation marks following it (11.11.2011).",
"DD.MM.YYYY",
)
def test_parse_with_leading_and_trailing_whitespace(self):
assert self.parser.parse(" 2016", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse("2016 ", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse(" 2016 ", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse(
" 2016-05-16 04:05:06.789120 ", "YYYY-MM-DD hh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
" 2016-05-16T04:05:06.789120 ", "YYYY-MM-DDThh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
def test_parse_YYYY_DDDD(self):
assert self.parser.parse("1998-136", "YYYY-DDDD") == datetime(1998, 5, 16)
assert self.parser.parse("1998-006", "YYYY-DDDD") == datetime(1998, 1, 6)
with pytest.raises(ParserError):
self.parser.parse("1998-456", "YYYY-DDDD")
def test_parse_YYYY_DDD(self):
assert self.parser.parse("1998-6", "YYYY-DDD") == datetime(1998, 1, 6)
assert self.parser.parse("1998-136", "YYYY-DDD") == datetime(1998, 5, 16)
with pytest.raises(ParserError):
self.parser.parse("1998-756", "YYYY-DDD")
# month cannot be passed with DDD and DDDD tokens
def test_parse_YYYY_MM_DDDD(self):
with pytest.raises(ParserError):
self.parser.parse("2015-01-009", "YYYY-MM-DDDD")
# year is required with the DDD and DDDD tokens
def test_parse_DDD_only(self):
with pytest.raises(ParserError):
self.parser.parse("5", "DDD")
def test_parse_DDDD_only(self):
with pytest.raises(ParserError):
self.parser.parse("145", "DDDD")
def test_parse_ddd_and_dddd(self):
fr_parser = parser.DateTimeParser("fr")
# Day of week should be ignored when a day is passed
# 2019-10-17 is a Thursday, so we know day of week
# is ignored if the same date is outputted
expected = datetime(2019, 10, 17)
assert self.parser.parse("Tue 2019-10-17", "ddd YYYY-MM-DD") == expected
assert fr_parser.parse("mar 2019-10-17", "ddd YYYY-MM-DD") == expected
assert self.parser.parse("Tuesday 2019-10-17", "dddd YYYY-MM-DD") == expected
assert fr_parser.parse("mardi 2019-10-17", "dddd YYYY-MM-DD") == expected
# Get first Tuesday after epoch
expected = datetime(1970, 1, 6)
assert self.parser.parse("Tue", "ddd") == expected
assert fr_parser.parse("mar", "ddd") == expected
assert self.parser.parse("Tuesday", "dddd") == expected
assert fr_parser.parse("mardi", "dddd") == expected
# Get first Tuesday in 2020
expected = datetime(2020, 1, 7)
assert self.parser.parse("Tue 2020", "ddd YYYY") == expected
assert fr_parser.parse("mar 2020", "ddd YYYY") == expected
assert self.parser.parse("Tuesday 2020", "dddd YYYY") == expected
assert fr_parser.parse("mardi 2020", "dddd YYYY") == expected
# Get first Tuesday in February 2020
expected = datetime(2020, 2, 4)
assert self.parser.parse("Tue 02 2020", "ddd MM YYYY") == expected
assert fr_parser.parse("mar 02 2020", "ddd MM YYYY") == expected
assert self.parser.parse("Tuesday 02 2020", "dddd MM YYYY") == expected
assert fr_parser.parse("mardi 02 2020", "dddd MM YYYY") == expected
# Get first Tuesday in February after epoch
expected = datetime(1970, 2, 3)
assert self.parser.parse("Tue 02", "ddd MM") == expected
assert fr_parser.parse("mar 02", "ddd MM") == expected
assert self.parser.parse("Tuesday 02", "dddd MM") == expected
assert fr_parser.parse("mardi 02", "dddd MM") == expected
# Times remain intact
expected = datetime(2020, 2, 4, 10, 25, 54, 123456, tz.tzoffset(None, -3600))
assert (
self.parser.parse(
"Tue 02 2020 10:25:54.123456-01:00", "ddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
assert (
fr_parser.parse(
"mar 02 2020 10:25:54.123456-01:00", "ddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
assert (
self.parser.parse(
"Tuesday 02 2020 10:25:54.123456-01:00", "dddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
assert (
fr_parser.parse(
"mardi 02 2020 10:25:54.123456-01:00", "dddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
def test_parse_ddd_and_dddd_ignore_case(self):
# Regression test for issue #851
expected = datetime(2019, 6, 24)
assert (
self.parser.parse("MONDAY, June 24, 2019", "dddd, MMMM DD, YYYY")
== expected
)
def test_parse_ddd_and_dddd_then_format(self):
# Regression test for issue #446
arw_formatter = formatter.DateTimeFormatter()
assert arw_formatter.format(self.parser.parse("Mon", "ddd"), "ddd") == "Mon"
assert (
arw_formatter.format(self.parser.parse("Monday", "dddd"), "dddd")
== "Monday"
)
assert arw_formatter.format(self.parser.parse("Tue", "ddd"), "ddd") == "Tue"
assert (
arw_formatter.format(self.parser.parse("Tuesday", "dddd"), "dddd")
== "Tuesday"
)
assert arw_formatter.format(self.parser.parse("Wed", "ddd"), "ddd") == "Wed"
assert (
arw_formatter.format(self.parser.parse("Wednesday", "dddd"), "dddd")
== "Wednesday"
)
assert arw_formatter.format(self.parser.parse("Thu", "ddd"), "ddd") == "Thu"
assert (
arw_formatter.format(self.parser.parse("Thursday", "dddd"), "dddd")
== "Thursday"
)
assert arw_formatter.format(self.parser.parse("Fri", "ddd"), "ddd") == "Fri"
assert (
arw_formatter.format(self.parser.parse("Friday", "dddd"), "dddd")
== "Friday"
)
assert arw_formatter.format(self.parser.parse("Sat", "ddd"), "ddd") == "Sat"
assert (
arw_formatter.format(self.parser.parse("Saturday", "dddd"), "dddd")
== "Saturday"
)
assert arw_formatter.format(self.parser.parse("Sun", "ddd"), "ddd") == "Sun"
assert (
arw_formatter.format(self.parser.parse("Sunday", "dddd"), "dddd")
== "Sunday"
)
def test_parse_HH_24(self):
assert self.parser.parse(
"2019-10-30T24:00:00", "YYYY-MM-DDTHH:mm:ss"
) == datetime(2019, 10, 31, 0, 0, 0, 0)
assert self.parser.parse("2019-10-30T24:00", "YYYY-MM-DDTHH:mm") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse("2019-10-30T24", "YYYY-MM-DDTHH") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse(
"2019-10-30T24:00:00.0", "YYYY-MM-DDTHH:mm:ss.S"
) == datetime(2019, 10, 31, 0, 0, 0, 0)
assert self.parser.parse(
"2019-10-31T24:00:00", "YYYY-MM-DDTHH:mm:ss"
) == datetime(2019, 11, 1, 0, 0, 0, 0)
assert self.parser.parse(
"2019-12-31T24:00:00", "YYYY-MM-DDTHH:mm:ss"
) == datetime(2020, 1, 1, 0, 0, 0, 0)
assert self.parser.parse(
"2019-12-31T23:59:59.9999999", "YYYY-MM-DDTHH:mm:ss.S"
) == datetime(2020, 1, 1, 0, 0, 0, 0)
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:01:00", "YYYY-MM-DDTHH:mm:ss")
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:00:01", "YYYY-MM-DDTHH:mm:ss")
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:00:00.1", "YYYY-MM-DDTHH:mm:ss.S")
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:00:00.999999", "YYYY-MM-DDTHH:mm:ss.S")
def test_parse_W(self):
assert self.parser.parse("2011-W05-4", "W") == datetime(2011, 2, 3)
assert self.parser.parse("2011W054", "W") == datetime(2011, 2, 3)
assert self.parser.parse("2011-W05", "W") == datetime(2011, 1, 31)
assert self.parser.parse("2011W05", "W") == datetime(2011, 1, 31)
assert self.parser.parse("2011-W05-4T14:17:01", "WTHH:mm:ss") == datetime(
2011, 2, 3, 14, 17, 1
)
assert self.parser.parse("2011W054T14:17:01", "WTHH:mm:ss") == datetime(
2011, 2, 3, 14, 17, 1
)
assert self.parser.parse("2011-W05T14:17:01", "WTHH:mm:ss") == datetime(
2011, 1, 31, 14, 17, 1
)
assert self.parser.parse("2011W05T141701", "WTHHmmss") == datetime(
2011, 1, 31, 14, 17, 1
)
assert self.parser.parse("2011W054T141701", "WTHHmmss") == datetime(
2011, 2, 3, 14, 17, 1
)
bad_formats = [
"201W22",
"1995-W1-4",
"2001-W34-90",
"2001--W34",
"2011-W03--3",
"thstrdjtrsrd676776r65",
"2002-W66-1T14:17:01",
"2002-W23-03T14:17:01",
]
for fmt in bad_formats:
with pytest.raises(ParserError):
self.parser.parse(fmt, "W")
def test_parse_normalize_whitespace(self):
assert self.parser.parse(
"Jun 1 2005 1:33PM", "MMM D YYYY H:mmA", normalize_whitespace=True
) == datetime(2005, 6, 1, 13, 33)
with pytest.raises(ParserError):
self.parser.parse("Jun 1 2005 1:33PM", "MMM D YYYY H:mmA")
assert self.parser.parse(
"\t 2013-05-05 T \n 12:30:45\t123456 \t \n",
"YYYY-MM-DD T HH:mm:ss S",
normalize_whitespace=True,
) == datetime(2013, 5, 5, 12, 30, 45, 123456)
with pytest.raises(ParserError):
self.parser.parse(
"\t 2013-05-05 T \n 12:30:45\t123456 \t \n",
"YYYY-MM-DD T HH:mm:ss S",
)
assert self.parser.parse(
" \n Jun 1\t 2005\n ", "MMM D YYYY", normalize_whitespace=True
) == datetime(2005, 6, 1)
with pytest.raises(ParserError):
self.parser.parse(" \n Jun 1\t 2005\n ", "MMM D YYYY")
@pytest.mark.usefixtures("dt_parser_regex")
| TestDateTimeParserParse |
python | kamyu104__LeetCode-Solutions | Python/sum-of-number-and-its-reverse.py | {
"start": 1496,
"end": 1709
} | class ____(object):
def sumOfNumberAndReverse(self, num):
"""
:type num: int
:rtype: bool
"""
return any(x+int(str(x)[::-1]) == num for x in xrange(num//2, num+1))
| Solution3 |
python | django__django | tests/m2m_through_regress/models.py | {
"start": 2480,
"end": 2574
} | class ____(IndividualCompetitor):
class Meta:
proxy = True
| ProxiedIndividualCompetitor |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/components/reward_providers/curiosity_reward_provider.py | {
"start": 988,
"end": 2543
} | class ____(BaseRewardProvider):
beta = 0.2 # Forward vs Inverse loss weight
loss_multiplier = 10.0 # Loss multiplier
def __init__(self, specs: BehaviorSpec, settings: CuriositySettings) -> None:
super().__init__(specs, settings)
self._ignore_done = True
self._network = CuriosityNetwork(specs, settings)
self._network.to(default_device())
self.optimizer = torch.optim.Adam(
self._network.parameters(), lr=settings.learning_rate
)
self._has_updated_once = False
def evaluate(self, mini_batch: AgentBuffer) -> np.ndarray:
with torch.no_grad():
rewards = ModelUtils.to_numpy(self._network.compute_reward(mini_batch))
rewards = np.minimum(rewards, 1.0 / self.strength)
return rewards * self._has_updated_once
def update(self, mini_batch: AgentBuffer) -> Dict[str, np.ndarray]:
self._has_updated_once = True
forward_loss = self._network.compute_forward_loss(mini_batch)
inverse_loss = self._network.compute_inverse_loss(mini_batch)
loss = self.loss_multiplier * (
self.beta * forward_loss + (1.0 - self.beta) * inverse_loss
)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {
"Losses/Curiosity Forward Loss": forward_loss.item(),
"Losses/Curiosity Inverse Loss": inverse_loss.item(),
}
def get_modules(self):
return {f"Module:{self.name}": self._network}
| CuriosityRewardProvider |
python | fluentpython__example-code-2e | 15-more-types/protocol/mymax/mymax.py | {
"start": 128,
"end": 1705
} | class ____(Protocol):
def __lt__(self, other: Any) -> bool: ...
T = TypeVar('T')
LT = TypeVar('LT', bound=SupportsLessThan)
DT = TypeVar('DT')
MISSING = object()
EMPTY_MSG = 'max() arg is an empty sequence'
@overload
def max(__arg1: LT, __arg2: LT, *args: LT, key: None = ...) -> LT:
...
@overload
def max(__arg1: T, __arg2: T, *args: T, key: Callable[[T], LT]) -> T:
...
@overload
def max(__iterable: Iterable[LT], *, key: None = ...) -> LT:
...
@overload
def max(__iterable: Iterable[T], *, key: Callable[[T], LT]) -> T:
...
@overload
def max(__iterable: Iterable[LT], *, key: None = ...,
default: DT) -> Union[LT, DT]:
...
@overload
def max(__iterable: Iterable[T], *, key: Callable[[T], LT],
default: DT) -> Union[T, DT]:
...
# end::MYMAX_TYPES[]
# tag::MYMAX[]
def max(first, *args, key=None, default=MISSING):
if args:
series = args
candidate = first
else:
series = iter(first)
try:
candidate = next(series)
except StopIteration:
if default is not MISSING:
return default
raise ValueError(EMPTY_MSG) from None
if key is None:
for current in series:
if candidate < current:
candidate = current
else:
candidate_key = key(candidate)
for current in series:
current_key = key(current)
if candidate_key < current_key:
candidate = current
candidate_key = current_key
return candidate
# end::MYMAX[]
| SupportsLessThan |
python | django__django | tests/migrations/test_migrations_plan/0004_fourth.py | {
"start": 35,
"end": 225
} | class ____(migrations.Migration):
dependencies = [
("migrations", "0003_third"),
]
operations = [migrations.RunSQL("SELECT * FROM migrations_author WHERE id = 1")]
| Migration |
python | fluentpython__example-code-2e | 15-more-types/cafeteria/cafeteria.py | {
"start": 38,
"end": 80
} | class ____:
"""Any beverage."""
| Beverage |
python | lepture__authlib | authlib/oauth2/rfc6749/grants/refresh_token.py | {
"start": 603,
"end": 6569
} | class ____(BaseGrant, TokenEndpointMixin):
"""A special grant endpoint for refresh_token grant_type. Refreshing an
Access Token per `Section 6`_.
.. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
"""
GRANT_TYPE = "refresh_token"
#: The authorization server MAY issue a new refresh token
INCLUDE_NEW_REFRESH_TOKEN = False
def _validate_request_client(self):
# require client authentication for confidential clients or for any
# client that was issued client credentials (or with other
# authentication requirements)
client = self.authenticate_token_endpoint_client()
log.debug("Validate token request of %r", client)
if not client.check_grant_type(self.GRANT_TYPE):
raise UnauthorizedClientError(
f"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'"
)
return client
def _validate_request_token(self, client):
refresh_token = self.request.form.get("refresh_token")
if refresh_token is None:
raise InvalidRequestError("Missing 'refresh_token' in request.")
token = self.authenticate_refresh_token(refresh_token)
if not token or not token.check_client(client):
raise InvalidGrantError()
return token
def _validate_token_scope(self, token):
scope = self.request.payload.scope
if not scope:
return
original_scope = token.get_scope()
if not original_scope:
raise InvalidScopeError()
original_scope = set(scope_to_list(original_scope))
if not original_scope.issuperset(set(scope_to_list(scope))):
raise InvalidScopeError()
def validate_token_request(self):
"""If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body, per Section 6:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
For example, the client makes the following HTTP request using
transport-layer security (with extra line breaks for display purposes
only):
.. code-block:: http
POST /token HTTP/1.1
Host: server.example.com
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
Content-Type: application/x-www-form-urlencoded
grant_type=refresh_token&refresh_token=tGzv3JOkF0XG5Qx2TlKWIA
"""
client = self._validate_request_client()
self.request.client = client
refresh_token = self._validate_request_token(client)
self._validate_token_scope(refresh_token)
self.request.refresh_token = refresh_token
@hooked
def create_token_response(self):
"""If valid and authorized, the authorization server issues an access
token as described in Section 5.1. If the request failed
verification or is invalid, the authorization server returns an error
response as described in Section 5.2.
"""
refresh_token = self.request.refresh_token
user = self.authenticate_user(refresh_token)
if not user:
raise InvalidRequestError("There is no 'user' for this token.")
client = self.request.client
token = self.issue_token(user, refresh_token)
log.debug("Issue token %r to %r", token, client)
self.request.user = user
self.save_token(token)
self.revoke_old_credential(refresh_token)
return 200, token, self.TOKEN_RESPONSE_HEADER
def issue_token(self, user, refresh_token):
scope = self.request.payload.scope
if not scope:
scope = refresh_token.get_scope()
token = self.generate_token(
user=user,
scope=scope,
include_refresh_token=self.INCLUDE_NEW_REFRESH_TOKEN,
)
return token
def authenticate_refresh_token(self, refresh_token):
"""Get token information with refresh_token string. Developers MUST
implement this method in subclass::
def authenticate_refresh_token(self, refresh_token):
token = Token.get(refresh_token=refresh_token)
if token and not token.refresh_token_revoked:
return token
:param refresh_token: The refresh token issued to the client
:return: token
"""
raise NotImplementedError()
def authenticate_user(self, refresh_token):
"""Authenticate the user related to this credential. Developers MUST
implement this method in subclass::
def authenticate_user(self, credential):
return User.get(credential.user_id)
:param refresh_token: Token object
:return: user
"""
raise NotImplementedError()
def revoke_old_credential(self, refresh_token):
"""The authorization server MAY revoke the old refresh token after
issuing a new refresh token to the client. Developers MUST implement
this method in subclass::
def revoke_old_credential(self, refresh_token):
credential.revoked = True
credential.save()
:param refresh_token: Token object
"""
raise NotImplementedError()
| RefreshTokenGrant |
python | django__django | django/contrib/admin/exceptions.py | {
"start": 335,
"end": 426
} | class ____(Exception):
"""The model is already registered."""
pass
| AlreadyRegistered |
python | zarr-developers__zarr-python | src/zarr/codecs/sharding.py | {
"start": 2024,
"end": 2594
} | class ____(ByteGetter):
shard_dict: ShardMapping
chunk_coords: tuple[int, ...]
async def get(
self, prototype: BufferPrototype, byte_range: ByteRequest | None = None
) -> Buffer | None:
assert byte_range is None, "byte_range is not supported within shards"
assert prototype == default_buffer_prototype(), (
f"prototype is not supported within shards currently. diff: {prototype} != {default_buffer_prototype()}"
)
return self.shard_dict.get(self.chunk_coords)
@dataclass(frozen=True)
| _ShardingByteGetter |
python | ray-project__ray | python/ray/_private/state_api_test_utils.py | {
"start": 874,
"end": 953
} | class ____:
latency_sec: float
result_size: int
@dataclass
| StateAPIMetric |
python | doocs__leetcode | solution/2900-2999/2926.Maximum Balanced Subsequence Sum/Solution.py | {
"start": 394,
"end": 780
} | class ____:
def maxBalancedSubsequenceSum(self, nums: List[int]) -> int:
arr = [x - i for i, x in enumerate(nums)]
s = sorted(set(arr))
tree = BinaryIndexedTree(len(s))
for i, x in enumerate(nums):
j = bisect_left(s, x - i) + 1
v = max(tree.query(j), 0) + x
tree.update(j, v)
return tree.query(len(s))
| Solution |
python | pallets__click | src/click/parser.py | {
"start": 4007,
"end": 5800
} | class ____:
def __init__(
self,
obj: CoreOption,
opts: cabc.Sequence[str],
dest: str | None,
action: str | None = None,
nargs: int = 1,
const: t.Any | None = None,
):
self._short_opts = []
self._long_opts = []
self.prefixes: set[str] = set()
for opt in opts:
prefix, value = _split_opt(opt)
if not prefix:
raise ValueError(f"Invalid start character for option ({opt})")
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = "store"
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self) -> bool:
return self.action in ("store", "append")
def process(self, value: t.Any, state: _ParsingState) -> None:
if self.action == "store":
state.opts[self.dest] = value # type: ignore
elif self.action == "store_const":
state.opts[self.dest] = self.const # type: ignore
elif self.action == "append":
state.opts.setdefault(self.dest, []).append(value) # type: ignore
elif self.action == "append_const":
state.opts.setdefault(self.dest, []).append(self.const) # type: ignore
elif self.action == "count":
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore
else:
raise ValueError(f"unknown action '{self.action}'")
state.order.append(self.obj)
| _Option |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_invite_index.py | {
"start": 8379,
"end": 14055
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-member-invite-index"
method = "post"
def setUp(self) -> None:
self.login_as(self.user)
def test_forbid_qq(self) -> None:
data = {"email": "1234@qq.com", "orgRole": "member", "teams": [self.team.slug]}
response = self.get_error_response(self.organization.slug, **data, status_code=400)
assert response.data["email"][0] == "Enter a valid email address."
@patch.object(OrganizationMemberInvite, "send_invite_email")
def test_simple(self, mock_send_invite_email: MagicMock) -> None:
data = {"email": "mifu@email.com", "orgRole": "member", "teams": [self.team.slug]}
response = self.get_success_response(self.organization.slug, **data)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.email == "mifu@email.com"
assert omi.role == "member"
assert omi.organization_member_team_data == [
{"id": self.team.id, "role": None, "slug": self.team.slug}
]
assert omi.inviter_id == self.user.id
mock_send_invite_email.assert_called_once()
def test_no_teams(self) -> None:
data = {"email": "mifu@email.com", "orgRole": "member", "teams": []}
response = self.get_success_response(self.organization.slug, **data)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.email == "mifu@email.com"
assert omi.role == "member"
assert omi.organization_member_team_data == []
assert omi.inviter_id == self.user.id
@patch.object(OrganizationMemberInvite, "send_invite_email")
def test_referrer_param(self, mock_send_invite_email: MagicMock) -> None:
data = {"email": "mifu@email.com", "orgRole": "member", "teams": [self.team.slug]}
response = self.get_success_response(
self.organization.slug, **data, qs_params={"referrer": "test_referrer"}
)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.email == "mifu@email.com"
assert omi.role == "member"
assert omi.organization_member_team_data == [
{"id": self.team.id, "role": None, "slug": self.team.slug}
]
assert omi.inviter_id == self.user.id
mock_send_invite_email.assert_called_with("test_referrer")
@patch.object(OrganizationMemberInvite, "send_invite_email")
def test_internal_integration_token_can_only_invite_member_role(
self, mock_send_invite_email: MagicMock
) -> None:
internal_integration = self.create_internal_integration(
name="Internal App", organization=self.organization, scopes=["member:write"]
)
token = self.create_internal_integration_token(
user=self.user, internal_integration=internal_integration
)
err_message = (
"Integration tokens are restricted to inviting new members with the member role only."
)
data = {"email": "cat@meow.com", "orgRole": "owner", "teams": [self.team.slug]}
response = self.get_error_response(
self.organization.slug,
**data,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=400,
)
assert response.data["orgRole"][0] == err_message
data = {"email": "dog@woof.com", "orgRole": "manager", "teams": [self.team.slug]}
response = self.get_error_response(
self.organization.slug,
**data,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=400,
)
assert response.data["orgRole"][0] == err_message
data = {"email": "mifu@email.com", "orgRole": "member", "teams": [self.team.slug]}
response = self.get_success_response(
self.organization.slug,
**data,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=201,
)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.email == "mifu@email.com"
assert omi.role == "member"
assert omi.organization_member_team_data == [
{"id": self.team.id, "slug": self.team.slug, "role": None}
]
mock_send_invite_email.assert_called_once()
@patch("sentry.ratelimits.for_organization_member_invite")
def test_rate_limited(self, mock_rate_limit: MagicMock) -> None:
mock_rate_limit.return_value = True
data = {"email": "mifu@email.com", "orgRole": "member"}
self.get_error_response(self.organization.slug, **data, status_code=429)
assert not OrganizationMemberInvite.objects.filter(email="mifu@email.com").exists()
@patch(
"sentry.roles.organization_roles.get",
wraps=mock_organization_roles_get_factory(organization_roles.get),
)
def test_cannot_add_to_team_when_team_roles_disabled(self, mock_get: MagicMock) -> None:
owner_user = self.create_user("owner@localhost")
self.owner = self.create_member(
user=owner_user, organization=self.organization, role="owner"
)
self.login_as(user=owner_user)
data = {
"email": "mifu@email.com",
"orgRole": "member",
"teams": [self.team.slug],
}
response = self.get_error_response(self.organization.slug, **data, status_code=400)
assert (
response.data["teams"][0]
== "The user with a 'member' role cannot have team-level permissions."
)
| OrganizationMemberInvitePostTest |
python | apache__avro | lang/py/avro/test/sample_http_server.py | {
"start": 1570,
"end": 2054
} | class ____(avro.ipc.Responder):
def __init__(self) -> None:
super().__init__(MAIL_PROTOCOL)
def invoke(self, message: avro.protocol.Message, request: Mapping[str, Mapping[str, str]]) -> str:
if message.name == "send":
return f"Sent message to {request['message']['to']} from {request['message']['from']} with body {request['message']['body']}"
if message.name == "replay":
return "replay"
raise RuntimeError
| MailResponder |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 64251,
"end": 64744
} | class ____(PlotActionTool):
''' *toolbar icon*: |redo_icon|
Redo tool reverses the last action performed by undo tool.
.. |redo_icon| image:: /_images/icons/redo.svg
:height: 24px
:alt: Icon of an arrow on a circular arc pointing to the right representing the redo tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
| RedoTool |
python | doocs__leetcode | lcof/面试题13. 机器人的运动范围/Solution.py | {
"start": 0,
"end": 385
} | class ____:
def movingCount(self, m: int, n: int, k: int) -> int:
def f(x: int) -> int:
return x // 10 + x % 10
def dfs(i, j):
if i >= m or j >= n or f(i) + f(j) > k or (i, j) in vis:
return 0
vis.add((i, j))
return 1 + dfs(i + 1, j) + dfs(i, j + 1)
vis = set()
return dfs(0, 0)
| Solution |
python | PyCQA__pydocstyle | src/tests/test_cases/canonical_google_examples.py | {
"start": 3371,
"end": 4176
} | class ____:
"""Summary of class here.
Longer class information....
Longer class information....
Attributes:
likes_spam: A boolean indicating if we like SPAM or not.
eggs: An integer count of the eggs we have laid.
"""
@expect("D401: First line should be in imperative mood "
"(perhaps 'Init', not 'Inits')", arg_count=2)
def __init__(self, likes_spam=False):
"""Inits SampleClass with blah."""
if self: # added to avoid NameError when run via @expect decorator
self.likes_spam = likes_spam
self.eggs = 0
@expect("D401: First line should be in imperative mood "
"(perhaps 'Perform', not 'Performs')", arg_count=1)
def public_method(self):
"""Performs operation blah."""
| SampleClass |
python | squidfunk__mkdocs-material | material/plugins/privacy/parser.py | {
"start": 1682,
"end": 1955
} | class ____(HTMLParser):
# Initialize parser
def __init__(self):
super().__init__(convert_charrefs = True)
self.result = None
# Create element
def handle_starttag(self, tag, attrs):
self.result = Element(tag, dict(attrs))
| FragmentParser |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 3399,
"end": 3533
} | class ____(OrganizationEndpoint):
def get(self, request, organization):
return Response({"ok": True})
| MyOrganizationEndpoint |
python | catalyst-team__catalyst | examples/self_supervised/src/runner.py | {
"start": 8956,
"end": 15615
} | class ____(ISelfSupervisedRunner, Runner):
"""Runner for experiments with contrastive model.
Args:
input_key: key in ``runner.batch`` dict mapping for model input
target_key: key in ``runner.batch`` dict mapping for target
loss_key: key for ``runner.batch_metrics`` to store criterion loss output
augemention_prefix: key for ``runner.batch`` to sample augumentions
projection_prefix: key for ``runner.batch`` to store model projection
embedding_prefix: key for `runner.batch`` to store model embeddings
loss_mode_prefix: selector key for loss calculation
Examples:
.. code-block:: python
# 1. loader and transforms
transforms = Compose(
[
ToTensor(),
Normalize((0.1307,), (0.3081,)),
torchvision.transforms.RandomCrop((28, 28)),
torchvision.transforms.RandomVerticalFlip(),
torchvision.transforms.RandomHorizontalFlip(),
]
)
mnist = MNIST("./logdir", train=True, download=True, transform=None)
contrastive_mnist = ContrastiveDataset(mnist, transforms=transforms)
train_loader = torch.utils.data.DataLoader(
contrastive_mnist,
batch_size=BATCH_SIZE
)
# 2. model and optimizer
encoder = MnistSimpleNet(out_features=16)
projection_head = nn.Sequential(
nn.Linear(16, 16, bias=False),
nn.ReLU(inplace=True),
nn.Linear(16, 16, bias=True)
)
class ContrastiveModel(torch.nn.Module):
def __init__(self, model, encoder):
super(ContrastiveModel, self).__init__()
self.model = model
self.encoder = encoder
def forward(self, x):
emb = self.encoder(x)
projection = self.model(emb)
return emb, projection
model = ContrastiveModel(model=projection_head, encoder=encoder)
optimizer = Adam(model.parameters(), lr=LR)
# 3. criterion with triplets sampling
criterion = NTXentLoss(tau=0.1)
callbacks = [
dl.ControlFlowCallback(
dl.CriterionCallback(
input_key="projection_left",
target_key="projection_right",
metric_key="loss"
),
loaders="train",
),
dl.SklearnModelCallback(
feature_key="embedding_left",
target_key="target",
train_loader="train",
valid_loaders="valid",
model_fn=RandomForestClassifier,
predict_method="predict_proba",
predict_key="sklearn_predict",
random_state=RANDOM_STATE,
n_estimators=10,
),
dl.ControlFlowCallback(
dl.AccuracyCallback(
target_key="target", input_key="sklearn_predict", topk=(1, 3)
),
loaders="valid",
),
]
runner = dl.ContrastiveRunner()
logdir = "./logdir"
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
callbacks=callbacks,
loaders={"train": train_loader, "valid": train_loader},
verbose=True,
logdir=logdir,
valid_loader="train",
valid_metric="loss",
minimize_valid_metric=True,
num_epochs=10,
)
"""
def __init__(
self,
model: RunnerModel = None,
engine: Engine = None,
input_key: str = "features",
target_key: str = "target",
loss_key: str = "loss",
augemention_prefix: str = "augment",
projection_prefix: str = "projection",
embedding_prefix: str = "embedding",
loss_mode_prefix: str = "projection",
):
"""Init."""
ISelfSupervisedRunner.__init__(
self,
input_key=input_key,
target_key=target_key,
loss_key=loss_key,
augemention_prefix=augemention_prefix,
projection_prefix=projection_prefix,
embedding_prefix=embedding_prefix,
)
Runner.__init__(self, model=model, engine=engine)
self.loss_mode_prefix = loss_mode_prefix
@torch.no_grad()
def predict_batch(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
"""
Run model inference on specified data batch.
.. warning::
You should not override this method. If you need specific model
call, override forward() method
Args:
batch: dictionary with data batch from DataLoader.
**kwargs: additional kwargs to pass to the model
Returns:
Mapping[str, Any]: model output dictionary
"""
batch = self._process_batch(batch)
output = self.forward(batch, **kwargs)
return output
def get_callbacks(self) -> "OrderedDict[str, Callback]":
"""Prepares the callbacks for selected stage.
Args:
stage: stage name
Returns:
dictionary with stage callbacks
"""
callbacks = super().get_callbacks()
callback_exists = lambda callback_fn: any(
callback_isinstance(x, callback_fn) for x in callbacks.values()
)
if isinstance(self._criterion, TorchCriterion) and not callback_exists(
ICriterionCallback
):
callbacks["_criterion"] = CriterionCallback(
input_key=f"{self.loss_mode_prefix}_left",
target_key=f"{self.loss_mode_prefix}_right",
metric_key=self._loss_key,
)
if isinstance(self._optimizer, TorchOptimizer) and not callback_exists(
IBackwardCallback
):
callbacks["_backward"] = BackwardCallback(metric_key=self._loss_key)
if isinstance(self._optimizer, TorchOptimizer) and not callback_exists(
IOptimizerCallback
):
callbacks["_optimizer"] = OptimizerCallback(metric_key=self._loss_key)
if isinstance(self._scheduler, TorchScheduler) and not callback_exists(
ISchedulerCallback
):
callbacks["_scheduler"] = SchedulerCallback(
loader_key=self._valid_loader, metric_key=self._valid_metric
)
return callbacks
__all__ = ["ISelfSupervisedRunner", "SelfSupervisedRunner"]
| SelfSupervisedRunner |
python | psf__black | tests/data/cases/class_blank_parentheses.py | {
"start": 50,
"end": 184
} | class ____ ( ):
first_test_data = 90
second_test_data = 100
def test_func(self):
return None
| ClassWithSpaceParentheses |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 11449,
"end": 11750
} | class ____(MarkdownHeader):
"""An H1 Markdown header."""
LEVEL = 1
DEFAULT_CSS = """
MarkdownH1 {
content-align: center middle;
color: $markdown-h1-color;
background: $markdown-h1-background;
text-style: $markdown-h1-text-style;
}
"""
| MarkdownH1 |
python | scipy__scipy | scipy/optimize/_dual_annealing.py | {
"start": 5336,
"end": 8776
} | class ____:
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D NumPy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D NumPy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rng_gen, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = rng_gen.uniform(self.lower, self.upper,
size=len(self.lower))
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if not np.isfinite(self.current_energy):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = rng_gen.uniform(self.lower,
self.upper,
size=self.lower.size)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return ('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
| EnergyState |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_modelresource/test_resource_fields.py | {
"start": 8459,
"end": 10460
} | class ____(TestCase):
class BookResource(resources.ModelResource):
author_name = fields.Field(
attribute="author_json__name", column_name="author_name", readonly=True
)
class Meta:
model = Book
fields = ("id", "author_name")
def get_queryset(self):
return (
super()
.get_queryset()
.annotate(author_json=JSONObject(name=("author__name")))
)
def setUp(self):
self.book = Book.objects.create(name="Moonraker", price=".99")
self.resource = ModelResourceDeclarationFieldWithDictKey.BookResource()
def test_declared_field_not_readonly_and_raises_error(self):
"""
Test that when the fields with dict keys are declared and is not declared
as readonly,it can`t be imported and raises an error.
"""
self.resource.fields["author_name"].readonly = False
rows = [
(self.book.id, "New Author"),
]
dataset = tablib.Dataset(*rows, headers=["id", "author_name"])
with self.assertRaises(import_export.exceptions.ImportError) as context:
self.resource.import_data(dataset, raise_errors=True)
assert "1: 'dict' object has no attribute 'name' and no __dict__ for "
"setting new attributes (OrderedDict({'id': 1, "
"'author_name': 'New Author'}))" == str(context.exception)
def test_declared_field_readonly_and_imported(self):
"""
Test that when the fields with dict keys are declared as readonly,
it can be imported without raising an error.
For this resources this not will have any effect, but at least
it will not raise an error.
"""
rows = [
(self.book.id, "New Author"),
]
dataset = tablib.Dataset(*rows, headers=["id", "author_name"])
self.resource.import_data(dataset, raise_errors=True)
| ModelResourceDeclarationFieldWithDictKey |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/_virtualenv.py | {
"start": 1629,
"end": 5347
} | class ____:
"""A meta path finder that allows patching the imported distutils modules."""
fullname = None
# lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup,
# because there are gevent-based applications that need to be first to import threading by themselves.
# See https://github.com/pypa/virtualenv/issues/1895 for details.
lock = [] # noqa: RUF012
def find_spec(self, fullname, path, target=None): # noqa: ARG002
# Guard against race conditions during file rewrite by checking if _DISTUTILS_PATCH is defined.
# This can happen when the file is being overwritten while it's being imported by another process.
# See https://github.com/pypa/virtualenv/issues/2969 for details.
try:
distutils_patch = _DISTUTILS_PATCH
except NameError:
return None
if fullname in distutils_patch and self.fullname is None: # noqa: PLR1702
# initialize lock[0] lazily
if len(self.lock) == 0:
import threading # noqa: PLC0415
lock = threading.Lock()
# there is possibility that two threads T1 and T2 are simultaneously running into find_spec,
# observing .lock as empty, and further going into hereby initialization. However due to the GIL,
# list.append() operation is atomic and this way only one of the threads will "win" to put the lock
# - that every thread will use - into .lock[0].
# https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.lock.append(lock)
from functools import partial # noqa: PLC0415
from importlib.util import find_spec # noqa: PLC0415
with self.lock[0]:
self.fullname = fullname
try:
spec = find_spec(fullname, path)
if spec is not None:
# https://www.python.org/dev/peps/pep-0451/#how-loading-will-work
is_new_api = hasattr(spec.loader, "exec_module")
func_name = "exec_module" if is_new_api else "load_module"
old = getattr(spec.loader, func_name)
func = self.exec_module if is_new_api else self.load_module
if old is not func:
try: # noqa: SIM105
setattr(spec.loader, func_name, partial(func, old))
except AttributeError:
pass # C-Extension loaders are r/o such as zipimporter with <3.7
return spec
finally:
self.fullname = None
return None
@staticmethod
def exec_module(old, module):
old(module)
try:
distutils_patch = _DISTUTILS_PATCH
except NameError:
return
if module.__name__ in distutils_patch:
# patch_dist or its dependencies may not be defined during file rewrite
with contextlib.suppress(NameError):
patch_dist(module)
@staticmethod
def load_module(old, name):
module = old(name)
try:
distutils_patch = _DISTUTILS_PATCH
except NameError:
return module
if module.__name__ in distutils_patch:
# patch_dist or its dependencies may not be defined during file rewrite
with contextlib.suppress(NameError):
patch_dist(module)
return module
sys.meta_path.insert(0, _Finder())
| _Finder |
python | scikit-learn__scikit-learn | sklearn/svm/_classes.py | {
"start": 35408,
"end": 45314
} | class ____(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, default=0.5
An upper bound on the fraction of margin errors (see :ref:`User Guide
<nu_svc>`) and a lower bound of the fraction of support vectors.
Should be in the interval (0, 1].
kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
default='rbf'
Specifies the kernel type to be used in the algorithm.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix. For an intuitive
visualization of different kernel types see
:ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.
degree : int, default=3
Degree of the polynomial kernel function ('poly').
Must be non-negative. Ignored by all other kernels.
gamma : {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features
- if float, must be non-negative.
.. versionchanged:: 0.22
The default value of ``gamma`` changed from 'auto' to 'scale'.
coef0 : float, default=0.0
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : bool, default=True
Whether to use the shrinking heuristic.
See the :ref:`User Guide <shrinking_svm>`.
probability : bool, default=False
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, will slow down that method as it internally uses
5-fold cross-validation, and `predict_proba` may be inconsistent with
`predict`. Read more in the :ref:`User Guide <scores_probabilities>`.
tol : float, default=1e-3
Tolerance for stopping criterion.
cache_size : float, default=200
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, default=None
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The "balanced" mode uses the values of y to automatically
adjust weights inversely proportional to class frequencies as
``n_samples / (n_classes * np.bincount(y))``.
verbose : bool, default=False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, default=-1
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : {'ovo', 'ovr'}, default='ovr'
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one
('ovo') is always used as multi-class strategy. The parameter is
ignored for binary classification.
.. versionchanged:: 0.19
decision_function_shape is 'ovr' by default.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
break_ties : bool, default=False
If true, ``decision_function_shape='ovr'``, and number of classes > 2,
:term:`predict` will break ties according to the confidence values of
:term:`decision_function`; otherwise the first class among the tied
classes is returned. Please note that breaking ties comes at a
relatively high computational cost compared to a simple predict.
See :ref:`sphx_glr_auto_examples_svm_plot_svm_tie_breaking.py` for an
example of its usage with ``decision_function_shape='ovr'``.
.. versionadded:: 0.22
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data for
probability estimates. Ignored when `probability` is False.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
class_weight_ : ndarray of shape (n_classes,)
Multipliers of parameter C of each class.
Computed based on the ``class_weight`` parameter.
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
coef_ : ndarray of shape (n_classes * (n_classes -1) / 2, n_features)
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
dual_coef_ : ndarray of shape (n_classes - 1, n_SV)
Dual coefficients of the support vector in the decision
function (see :ref:`sgd_mathematical_formulation`), multiplied by
their targets.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the :ref:`multi-class section of the User Guide
<svm_multi_class>` for details.
fit_status_ : int
0 if correctly fitted, 1 if the algorithm did not converge.
intercept_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : ndarray of shape (n_classes * (n_classes - 1) // 2,)
Number of iterations run by the optimization routine to fit the model.
The shape of this attribute depends on the number of models optimized
which in turn depends on the number of classes.
.. versionadded:: 1.1
support_ : ndarray of shape (n_SV,)
Indices of support vectors.
support_vectors_ : ndarray of shape (n_SV, n_features)
Support vectors.
n_support_ : ndarray of shape (n_classes,), dtype=int32
Number of support vectors for each class.
fit_status_ : int
0 if correctly fitted, 1 if the algorithm did not converge.
probA_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
probB_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
If `probability=True`, it corresponds to the parameters learned in
Platt scaling to produce probability estimates from decision values.
If `probability=False`, it's an empty array. Platt scaling uses the
logistic function
``1 / (1 + exp(decision_value * probA_ + probB_))``
where ``probA_`` and ``probB_`` are learned from the dataset [2]_. For
more information on the multiclass case and training procedure see
section 8 of [1]_.
shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
Array dimensions of training vector ``X``.
See Also
--------
SVC : Support Vector Machine for classification using libsvm.
LinearSVC : Scalable linear Support Vector Machine for classification using
liblinear.
References
----------
.. [1] `LIBSVM: A Library for Support Vector Machines
<http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf>`_
.. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector
Machines and Comparisons to Regularized Likelihood Methods"
<https://citeseerx.ist.psu.edu/doc_view/pid/42e5ed832d4310ce4378c44d05570439df28a393>`_
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.svm import NuSVC
>>> clf = make_pipeline(StandardScaler(), NuSVC())
>>> clf.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()), ('nusvc', NuSVC())])
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
_impl = "nu_svc"
_parameter_constraints: dict = {
**BaseSVC._parameter_constraints,
"nu": [Interval(Real, 0.0, 1.0, closed="right")],
}
_parameter_constraints.pop("C")
def __init__(
self,
*,
nu=0.5,
kernel="rbf",
degree=3,
gamma="scale",
coef0=0.0,
shrinking=True,
probability=False,
tol=1e-3,
cache_size=200,
class_weight=None,
verbose=False,
max_iter=-1,
decision_function_shape="ovr",
break_ties=False,
random_state=None,
):
super().__init__(
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
C=0.0,
nu=nu,
shrinking=shrinking,
probability=probability,
cache_size=cache_size,
class_weight=class_weight,
verbose=verbose,
max_iter=max_iter,
decision_function_shape=decision_function_shape,
break_ties=break_ties,
random_state=random_state,
)
| NuSVC |
python | realpython__materials | django-todo-list/source_code_final/todo_app/views.py | {
"start": 675,
"end": 896
} | class ____(CreateView):
model = ToDoList
fields = ["title"]
def get_context_data(self):
context = super().get_context_data()
context["title"] = "Add a new list"
return context
| ListCreate |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/gcs/gcs_fake_resource.py | {
"start": 1570,
"end": 2927
} | class ____:
def __init__(self):
from unittest import mock
self.buckets: dict[str, FakeGCSBucket] = {}
self.mock_extras = mock.MagicMock()
def bucket(self, bucket_name: str, *args, **kwargs):
self.mock_extras.bucket(*args, **kwargs)
if bucket_name not in self.buckets.keys():
self.buckets[bucket_name] = FakeGCSBucket(name=bucket_name)
return self.buckets[bucket_name]
def list_buckets(self, *args, **kwargs):
self.mock_extras.list_buckets(*args, **kwargs)
yield from self.buckets.values()
def list_blobs(
self,
bucket_or_name: Union[FakeGCSBucket, str],
*args,
prefix: Optional[str] = None,
**kwargs,
):
self.mock_extras.list_blobs(*args, **kwargs)
if isinstance(bucket_or_name, str):
bucket = self.bucket(bucket_or_name)
else:
bucket = bucket_or_name
for blob in self.buckets[bucket.name].blobs.values():
if prefix is None:
yield blob
elif prefix in blob.name:
yield blob
def get_all_blob_paths(self) -> AbstractSet[str]:
return {
f"{bucket.name}/{blob.name}"
for bucket in self.buckets.values()
for blob in bucket.blobs.values()
}
| FakeGCSClient |
python | getsentry__sentry | src/sentry/interfaces/user.py | {
"start": 467,
"end": 3132
} | class ____(Interface):
"""
An interface which describes the authenticated User for a request.
You should provide **at least** either an `id` (a unique identifier for
an authenticated user) or `ip_address` (their IP address).
All other attributes are optional.
>>> {
>>> "id": "unique_id",
>>> "username": "my_user",
>>> "email": "foo@example.com"
>>> "ip_address": "127.0.0.1",
>>> "optional": "value"
>>> }
"""
score = 1
display_score = 2020
@classmethod
def to_python(cls, data, **kwargs):
data = data.copy()
for key in ("id", "email", "username", "ip_address", "name", "geo", "data"):
data.setdefault(key, None)
if data["geo"] is not None:
data["geo"] = Geo.to_python(data["geo"], **kwargs)
return super().to_python(data, **kwargs)
def to_json(self):
return prune_empty_keys(
{
"id": self.id,
"email": self.email,
"username": self.username,
"ip_address": self.ip_address,
"name": self.name,
"geo": self.geo.to_json() if self.geo is not None else None,
"data": self.data or None,
}
)
def get_api_context(self, is_public=False, platform=None) -> EventUserApiContext:
return {
"id": self.id,
"email": self.email,
"username": self.username,
"ip_address": self.ip_address,
"name": self.name,
"geo": self.geo.to_json() if self.geo is not None else None,
"data": self.data,
}
def get_api_meta(self, meta, is_public=False, platform=None):
return {
"": meta.get(""),
"id": meta.get("id"),
"email": meta.get("email"),
"username": meta.get("username"),
"ip_address": meta.get("ip_address"),
"name": meta.get("name"),
"geo": meta.get("geo"),
"data": meta.get("data"),
}
def get_display_name(self):
return self.email or self.username
def get_label(self):
return self.name or self.email or self.username or self.id or self.ip_address
def to_email_html(self, event, **kwargs):
context = {
"user_id": self.id,
"user_email": self.email,
"user_username": self.username,
"user_ip_address": self.ip_address,
"user_data": self.data,
"user": self,
}
return render_to_string("sentry/partial/interfaces/user_email.html", context)
| User |
python | doocs__leetcode | solution/0800-0899/0847.Shortest Path Visiting All Nodes/Solution.py | {
"start": 0,
"end": 636
} | class ____:
def shortestPathLength(self, graph: List[List[int]]) -> int:
n = len(graph)
q = deque()
vis = set()
for i in range(n):
q.append((i, 1 << i))
vis.add((i, 1 << i))
ans = 0
while 1:
for _ in range(len(q)):
i, st = q.popleft()
if st == (1 << n) - 1:
return ans
for j in graph[i]:
nst = st | 1 << j
if (j, nst) not in vis:
vis.add((j, nst))
q.append((j, nst))
ans += 1
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0032_increase_webhook_maxsize.py | {
"start": 150,
"end": 567
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0031_add_modified_date_importedfile"),
]
operations = [
migrations.AlterField(
model_name="webhook",
name="url",
field=models.URLField(
blank=True, help_text="URL to send the webhook to", max_length=600
),
),
]
| Migration |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 4657,
"end": 5111
} | class ____:
"""
Represents a Sentry App notification action.
"""
settings: list[SentryAppFormConfigDataBlob] = dataclasses.field(default_factory=list)
@classmethod
def from_list(cls, data: list[dict[str, Any]] | None) -> "SentryAppDataBlob":
if data is None:
return cls()
return cls(settings=[SentryAppFormConfigDataBlob.from_dict(setting) for setting in data])
@dataclasses.dataclass
| SentryAppDataBlob |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operands/run_operands.py | {
"start": 891,
"end": 3079
} | class ____(SubsetAutomationCondition):
@property
def name(self) -> str:
return "executed_with_root_target"
async def compute_subset(self, context: AutomationContext) -> EntitySubset: # pyright: ignore[reportIncompatibleMethodOverride]
def _filter_fn(run_record: "RunRecord") -> bool:
if context.key == context.root_context.key:
# this happens when this is evaluated for a self-dependent asset. in these cases,
# it does not make sense to consider the asset as having been executed with itself
# as the partition key of the target is necessarily different than the partition
# key of the query key
return False
asset_selection = run_record.dagster_run.asset_selection or set()
check_selection = run_record.dagster_run.asset_check_selection or set()
return context.root_context.key in (asset_selection | check_selection)
return await context.asset_graph_view.compute_latest_run_matches_subset(
from_subset=context.candidate_subset, filter_fn=_filter_fn
)
def _run_tag_filter_fn(
run_record: "RunRecord",
tag_keys: Optional[Set[str]],
tag_values: Optional[Mapping[str, str]],
) -> bool:
if tag_keys and not all(key in run_record.dagster_run.tags for key in tag_keys):
return False
if tag_values and not all(
run_record.dagster_run.tags.get(key) == value for key, value in tag_values.items()
):
return False
return True
def _get_run_tag_filter_name(
base_name: str,
tag_keys: Optional[Set[str]],
tag_values: Optional[Mapping[str, str]],
) -> str:
props = []
name = base_name
if tag_keys is not None:
tag_key_str = ",".join(sorted(tag_keys))
props.append(f"tag_keys={{{tag_key_str}}}")
if tag_values is not None:
tag_value_str = ",".join([f"{key}:{value}" for key, value in sorted(tag_values.items())])
props.append(f"tag_values={{{tag_value_str}}}")
if props:
name += f"({', '.join(props)})"
return name
@whitelist_for_serdes
@record
| LatestRunExecutedWithRootTargetCondition |
python | ray-project__ray | python/ray/autoscaler/_private/local/node_provider.py | {
"start": 638,
"end": 4481
} | class ____:
def __init__(self, lock_path, save_path, provider_config):
self.lock = RLock()
os.makedirs(os.path.dirname(lock_path), exist_ok=True)
self.file_lock = FileLock(lock_path)
self.save_path = save_path
with self.lock:
with self.file_lock:
if os.path.exists(self.save_path):
workers = json.loads(open(self.save_path).read())
head_config = workers.get(provider_config["head_ip"])
if (
not head_config
or head_config.get("tags", {}).get(TAG_RAY_NODE_KIND)
!= NODE_KIND_HEAD
):
workers = {}
logger.info("Head IP changed - recreating cluster.")
else:
workers = {}
logger.info(
"ClusterState: Loaded cluster state: {}".format(list(workers))
)
for worker_ip in provider_config["worker_ips"]:
if worker_ip not in workers:
workers[worker_ip] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
"state": "terminated",
}
else:
assert (
workers[worker_ip]["tags"][TAG_RAY_NODE_KIND]
== NODE_KIND_WORKER
)
if provider_config["head_ip"] not in workers:
workers[provider_config["head_ip"]] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_HEAD},
"state": "terminated",
}
else:
assert (
workers[provider_config["head_ip"]]["tags"][TAG_RAY_NODE_KIND]
== NODE_KIND_HEAD
)
# Relevant when a user reduces the number of workers
# without changing the headnode.
list_of_node_ips = list(provider_config["worker_ips"])
list_of_node_ips.append(provider_config["head_ip"])
for worker_ip in list(workers):
if worker_ip not in list_of_node_ips:
del workers[worker_ip]
# Set external head ip, if provided by user.
# Necessary if calling `ray up` from outside the network.
# Refer to LocalNodeProvider.external_ip function.
external_head_ip = provider_config.get("external_head_ip")
if external_head_ip:
head = workers[provider_config["head_ip"]]
head["external_ip"] = external_head_ip
assert len(workers) == len(provider_config["worker_ips"]) + 1
with open(self.save_path, "w") as f:
logger.debug(
"ClusterState: Writing cluster state: {}".format(workers)
)
f.write(json.dumps(workers))
def get(self):
with self.lock:
with self.file_lock:
workers = json.loads(open(self.save_path).read())
return workers
def put(self, worker_id, info):
assert "tags" in info
assert "state" in info
with self.lock:
with self.file_lock:
workers = self.get()
workers[worker_id] = info
with open(self.save_path, "w") as f:
logger.info(
"ClusterState: "
"Writing cluster state: {}".format(list(workers))
)
f.write(json.dumps(workers))
| ClusterState |
python | pypa__warehouse | warehouse/subscriptions/models.py | {
"start": 1114,
"end": 1243
} | class ____(str, enum.Enum):
Month = "month"
Year = "year"
Week = "week"
Day = "day"
| StripeSubscriptionPriceInterval |
python | fluentpython__example-code | 12-inheritance/diamond.py | {
"start": 186,
"end": 411
} | class ____(B, C):
def ping(self):
super().ping()
print('post-ping:', self)
def pingpong(self):
self.ping()
super().ping()
self.pong()
super().pong()
C.pong(self)
| D |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 85856,
"end": 86377
} | class ____:
def test_required_passed_to_both_django_file_path_field_and_base(self):
field = serializers.FilePathField(
path=os.path.abspath(os.path.dirname(__file__)),
required=False,
)
assert "" in field.choices # Django adds empty choice if not required
assert field.required is False
with pytest.raises(SkipField):
field.run_validation(empty)
# Tests for SerializerMethodField.
# --------------------------------
| TestFilePathFieldRequired |
python | facelessuser__pymdown-extensions | pymdownx/caret.py | {
"start": 5329,
"end": 5529
} | class ____(util.PatternSequenceProcessor):
"""Just insert processor."""
PATTERNS = [
util.PatSeqItem(re.compile(INS, re.DOTALL | re.UNICODE), 'single', 'ins')
]
| CaretInsertProcessor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol10.py | {
"start": 737,
"end": 854
} | class ____(Protocol):
def __call__(self, a: int, /, *args: Any, k: str, **kwargs: Any) -> None:
pass
| Proto6 |
python | jazzband__django-simple-history | simple_history/tests/tests/test_deprecation.py | {
"start": 18,
"end": 356
} | class ____(unittest.TestCase):
"""Tests that check whether ``DeprecationWarning`` is raised for certain features,
and that compare ``simple_history.__version__`` against the version the features
will be removed in.
If this class is empty, it normally means that nothing is currently deprecated.
"""
| DeprecationWarningTest |
python | doocs__leetcode | solution/3200-3299/3237.Alt and Tab Simulation/Solution.py | {
"start": 0,
"end": 352
} | class ____:
def simulationResult(self, windows: List[int], queries: List[int]) -> List[int]:
s = set()
ans = []
for q in queries[::-1]:
if q not in s:
ans.append(q)
s.add(q)
for w in windows:
if w not in s:
ans.append(w)
return ans
| Solution |
python | walkccc__LeetCode | solutions/1823. Find the Winner of the Circular Game/1823-2.py | {
"start": 0,
"end": 633
} | class ____:
def findTheWinner(self, n: int, k: int) -> int:
# e.g. n = 4, k = 2.
# By using 0-indexed notation, we have the following circle:
#
# 0 -> 1 -> 2 -> 3 -> 0
# x
# 0 -> 1 -> 2 -> 0
#
# After the first round, 1 is removed.
# So, 2 becomes 0, 3 becomes 1, and 0 becomes 2.
# Let's denote that oldIndex = f(n, k) and newIndex = f(n - 1, k).
# By observation, we know f(n, k) = (f(n - 1, k) + k) % n.
def f(n: int, k: int) -> int:
if n == 1:
return 0
return (f(n - 1, k) + k) % n
# Converts back to 1-indexed.
return f(n, k) + 1
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_cloud_sql.py | {
"start": 25931,
"end": 39266
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.cloudsql_hook_no_default_project_id = CloudSQLHook(api_version="v1", gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_instance_import_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
import_method = get_conn.return_value.instances.return_value.import_
execute_method = import_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.import_instance(
project_id="example-project", instance="instance", body={}
)
import_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="example-project", operation_name="operation_id"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_instance_export_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
export_method = get_conn.return_value.instances.return_value.export
execute_method = export_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.export_instance(
project_id="example-project", instance="instance", body={}
)
export_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_get_instance_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
get_method = get_conn.return_value.instances.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "instance"}
wait_for_operation_to_complete.return_value = None
res = self.cloudsql_hook_no_default_project_id.get_instance(
project_id="example-project", instance="instance"
)
assert res is not None
assert res["name"] == "instance"
get_method.assert_called_once_with(instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_create_instance_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
insert_method = get_conn.return_value.instances.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.create_instance(project_id="example-project", body={})
insert_method.assert_called_once_with(body={}, project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_patch_instance_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
patch_method = get_conn.return_value.instances.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.patch_instance(
project_id="example-project", instance="instance", body={}
)
patch_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_delete_instance_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
delete_method = get_conn.return_value.instances.return_value.delete
execute_method = delete_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.delete_instance(
project_id="example-project", instance="instance"
)
delete_method.assert_called_once_with(instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project", time_to_sleep=5
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_get_database_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
get_method = get_conn.return_value.databases.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "database"}
wait_for_operation_to_complete.return_value = None
res = self.cloudsql_hook_no_default_project_id.get_database(
project_id="example-project", database="database", instance="instance"
)
assert res is not None
assert res["name"] == "database"
get_method.assert_called_once_with(
instance="instance", database="database", project="example-project"
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_create_database_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
insert_method = get_conn.return_value.databases.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.create_database(
project_id="example-project", instance="instance", body={}
)
insert_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_patch_database_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
patch_method = get_conn.return_value.databases.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.patch_database(
project_id="example-project", instance="instance", database="database", body={}
)
patch_method.assert_called_once_with(
body={}, database="database", instance="instance", project="example-project"
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_delete_database_overridden_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
delete_method = get_conn.return_value.databases.return_value.delete
execute_method = delete_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook_no_default_project_id.delete_database(
project_id="example-project", instance="instance", database="database"
)
delete_method.assert_called_once_with(
database="database", instance="instance", project="example-project"
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
def _parse_from_uri(uri: str):
connection_parameters = {}
uri_parts = urlsplit(uri)
connection_parameters["conn_type"] = uri_parts.scheme
rest_of_the_url = uri.replace(f"{uri_parts.scheme}://", "//")
uri_parts = urlsplit(rest_of_the_url)
host = unquote(uri_parts.hostname or "")
connection_parameters["host"] = host
quoted_schema = uri_parts.path[1:]
connection_parameters["schema"] = unquote(quoted_schema) if quoted_schema else ""
connection_parameters["login"] = unquote(uri_parts.username) if uri_parts.username else ""
connection_parameters["password"] = unquote(uri_parts.password) if uri_parts.password else ""
connection_parameters["port"] = uri_parts.port # type: ignore[assignment]
if uri_parts.query:
query = dict(parse_qsl(uri_parts.query, keep_blank_values=True))
connection_parameters["extra"] = json.dumps(query)
return connection_parameters
| TestGcpSqlHookNoDefaultProjectID |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 31066,
"end": 32868
} | class ____(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("_copied", "array")
def __init__(self, array: duckarray[Any, Any]):
self.array = as_indexable(array)
self._copied = False
def _ensure_copied(self):
if not self._copied:
self.array = as_indexable(np.array(self.array))
self._copied = True
def get_duck_array(self):
return self.array.get_duck_array()
async def async_get_duck_array(self):
return await self.array.async_get_duck_array()
def _oindex_get(self, indexer: OuterIndexer):
return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer]))
def _vindex_get(self, indexer: VectorizedIndexer):
return type(self)(_wrap_numpy_scalars(self.array.vindex[indexer]))
def __getitem__(self, indexer: ExplicitIndexer):
self._check_and_raise_if_non_basic_indexer(indexer)
return type(self)(_wrap_numpy_scalars(self.array[indexer]))
def transpose(self, order):
return self.array.transpose(order)
def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None:
self._ensure_copied()
self.array.vindex[indexer] = value
def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None:
self._ensure_copied()
self.array.oindex[indexer] = value
def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None:
self._check_and_raise_if_non_basic_indexer(indexer)
self._ensure_copied()
self.array[indexer] = value
def __deepcopy__(self, memo):
# CopyOnWriteArray is used to wrap backend array objects, which might
# point to files on disk, so we can't rely on the default deepcopy
# implementation.
return type(self)(self.array)
| CopyOnWriteArray |
python | ray-project__ray | rllib/algorithms/dreamerv3/utils/debugging.py | {
"start": 229,
"end": 5970
} | class ____(CartPoleEnv):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
low = np.concatenate([np.array([0.0]), self.observation_space.low])
high = np.concatenate([np.array([1000.0]), self.observation_space.high])
self.observation_space = gym.spaces.Box(low, high, shape=(5,), dtype=np.float32)
self.timesteps_ = 0
self._next_action = 0
self._seed = 1
def reset(self, *, seed=None, options=None):
ret = super().reset(seed=self._seed)
self._seed += 1
self.timesteps_ = 0
self._next_action = 0
obs = np.concatenate([np.array([self.timesteps_]), ret[0]])
return obs, ret[1]
def step(self, action):
ret = super().step(self._next_action)
self.timesteps_ += 1
self._next_action = 0 if self._next_action else 1
obs = np.concatenate([np.array([self.timesteps_]), ret[0]])
reward = 0.1 * self.timesteps_
return (obs, reward) + ret[2:]
gym.register("CartPoleDebug-v0", CartPoleDebug)
cartpole_env = gym.make("CartPoleDebug-v0", render_mode="rgb_array")
cartpole_env.reset()
frozenlake_env = gym.make(
"FrozenLake-v1", render_mode="rgb_array", is_slippery=False, map_name="4x4"
) # desc=["SF", "HG"])
frozenlake_env.reset()
def create_cartpole_dream_image(
dreamed_obs, # real space (not symlog'd)
dreamed_V, # real space (not symlog'd)
dreamed_a,
dreamed_r_tp1, # real space (not symlog'd)
dreamed_ri_tp1, # intrinsic reward
dreamed_c_tp1, # continue flag
value_target, # real space (not symlog'd)
initial_h,
as_tensor=False,
):
# CartPoleDebug
if dreamed_obs.shape == (5,):
# Set the state of our env to the given observation.
cartpole_env.unwrapped.state = np.array(dreamed_obs[1:], dtype=np.float32)
# Normal CartPole-v1
else:
cartpole_env.unwrapped.state = np.array(dreamed_obs, dtype=np.float32)
# Produce an RGB-image of the current state.
rgb_array = cartpole_env.render()
# Add value-, action-, reward-, and continue-prediction information.
image = Image.fromarray(rgb_array)
draw_obj = ImageDraw.Draw(image)
# fnt = ImageFont.load_default(size=40)
draw_obj.text(
(5, 6), f"Vt={dreamed_V:.2f} (Rt={value_target:.2f})", fill=(0, 0, 0)
) # , font=fnt.font, size=30)
draw_obj.text(
(5, 18),
f"at={'<--' if dreamed_a == 0 else '-->'} ({dreamed_a})",
fill=(0, 0, 0),
)
draw_obj.text((5, 30), f"rt+1={dreamed_r_tp1:.2f}", fill=(0, 0, 0))
if dreamed_ri_tp1 is not None:
draw_obj.text((5, 42), f"rit+1={dreamed_ri_tp1:.6f}", fill=(0, 0, 0))
draw_obj.text((5, 54), f"ct+1={dreamed_c_tp1}", fill=(0, 0, 0))
draw_obj.text((5, 66), f"|h|t={np.mean(np.abs(initial_h)):.5f}", fill=(0, 0, 0))
if dreamed_obs.shape == (5,):
draw_obj.text((20, 100), f"t={dreamed_obs[0]}", fill=(0, 0, 0))
# Return image.
np_img = np.asarray(image)
if as_tensor:
return torch.from_numpy(np_img, dtype=torch.uint8)
return np_img
def create_frozenlake_dream_image(
dreamed_obs, # real space (not symlog'd)
dreamed_V, # real space (not symlog'd)
dreamed_a,
dreamed_r_tp1, # real space (not symlog'd)
dreamed_ri_tp1, # intrinsic reward
dreamed_c_tp1, # continue flag
value_target, # real space (not symlog'd)
initial_h,
as_tensor=False,
):
frozenlake_env.unwrapped.s = np.argmax(dreamed_obs, axis=0)
# Produce an RGB-image of the current state.
rgb_array = frozenlake_env.render()
# Add value-, action-, reward-, and continue-prediction information.
image = Image.fromarray(rgb_array)
draw_obj = ImageDraw.Draw(image)
draw_obj.text((5, 6), f"Vt={dreamed_V:.2f} (Rt={value_target:.2f})", fill=(0, 0, 0))
action_arrow = (
"<--"
if dreamed_a == 0
else "v"
if dreamed_a == 1
else "-->"
if dreamed_a == 2
else "^"
)
draw_obj.text((5, 18), f"at={action_arrow} ({dreamed_a})", fill=(0, 0, 0))
draw_obj.text((5, 30), f"rt+1={dreamed_r_tp1:.2f}", fill=(0, 0, 0))
if dreamed_ri_tp1 is not None:
draw_obj.text((5, 42), f"rit+1={dreamed_ri_tp1:.6f}", fill=(0, 0, 0))
draw_obj.text((5, 54), f"ct+1={dreamed_c_tp1}", fill=(0, 0, 0))
draw_obj.text((5, 66), f"|h|t={np.mean(np.abs(initial_h)):.5f}", fill=(0, 0, 0))
# Return image.
np_img = np.asarray(image)
if as_tensor:
return torch.from_numpy(np_img, dtype=torch.uint8)
return np_img
if __name__ == "__main__":
# CartPole debug.
rgb_array = create_cartpole_dream_image(
dreamed_obs=np.array([100.0, 1.0, -0.01, 1.5, 0.02]),
dreamed_V=4.3,
dreamed_a=1,
dreamed_r_tp1=1.0,
dreamed_c_tp1=True,
initial_h=0.0,
value_target=8.0,
)
# ImageFont.load("arial.pil")
image = Image.fromarray(rgb_array)
image.show()
# Normal CartPole.
rgb_array = create_cartpole_dream_image(
dreamed_obs=np.array([1.0, -0.01, 1.5, 0.02]),
dreamed_V=4.3,
dreamed_a=1,
dreamed_r_tp1=1.0,
dreamed_c_tp1=True,
initial_h=0.1,
value_target=8.0,
)
# ImageFont.load("arial.pil")
image = Image.fromarray(rgb_array)
image.show()
# Frozenlake
rgb_array = create_frozenlake_dream_image(
dreamed_obs=np.array([1.0] + [0.0] * (frozenlake_env.observation_space.n - 1)),
dreamed_V=4.3,
dreamed_a=1,
dreamed_r_tp1=1.0,
dreamed_c_tp1=True,
initial_h=0.1,
value_target=8.0,
)
image = Image.fromarray(rgb_array)
image.show()
| CartPoleDebug |
python | huggingface__transformers | src/transformers/models/video_llava/modeling_video_llava.py | {
"start": 19257,
"end": 32641
} | class ____(VideoLlavaPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
"^language_model.model": "model.language_model",
"^image_tower": "model.image_tower",
"^video_tower": "model.video_tower",
"^multi_modal_projector": "model.multi_modal_projector",
"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: VideoLlavaConfig):
super().__init__(config)
self.model = VideoLlavaModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def get_image_features(
self,
pixel_values_images: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
):
return self.model.get_image_features(
pixel_values_images=pixel_values_images,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values_images: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, VideoLlavaCausalLMOutputWithPast]:
r"""
pixel_values_images (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
The tensors corresponding to the input images. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`VideoLlavaImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
[`VideoLlavaImageProcessor`] for processing images).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> import numpy as np
>>> import av
>>> from huggingface_hub import hf_hub_download
>>> from transformers import VideoLlavaProcessor, VideoLlavaForConditionalGeneration
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf")
>>> processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf")
>>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:"
>>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset")
>>> container = av.open(video_path)
>>> # sample uniformly 8 frames from the video
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 8).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> inputs = processor(text=prompt, videos=clip, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_length=80)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER: Why is this video funny? ASSISTANT: The video is funny because the baby is playing with a Wii remote while sitting on the floor, and the baby is wearing glasses.Ъ. The baby's actions are amusing because it is a young child trying to interact with a video game, which is not a typical activity for a"
>>> # to generate from image and video mix
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> prompt = [
... "USER: <image>\nHow many cats do you see? ASSISTANT:",
... "USER: <video>\nWhy is this video funny? ASSISTANT:"
... ]
>>> inputs = processor(text=prompt, images=image, videos=clip, padding=True, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_length=50)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
['USER: How many cats do you see? ASSISTANT: There are two cats visible in the image. (or three, if you count the one in the background).', 'USER: Why is this video funny? ASSISTANT: The video is funny because it shows a baby sitting on a bed and playing with a Wii remote.Ъ. The baby is holding the remote']
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids=input_ids,
pixel_values_images=pixel_values_images,
pixel_values_videos=pixel_values_videos,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return VideoLlavaCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
video_hidden_states=outputs.video_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values_images=None,
pixel_values_videos=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values_images"] = pixel_values_images
model_inputs["pixel_values_videos"] = pixel_values_videos
return model_inputs
@staticmethod
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
__all__ = ["VideoLlavaPreTrainedModel", "VideoLlavaModel", "VideoLlavaForConditionalGeneration"]
| VideoLlavaForConditionalGeneration |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/vendor/pretty.py | {
"start": 3878,
"end": 4164
} | class ____:
def __init__(self, value: object):
self.value = value
def __hash__(self) -> int:
return hash((type(self), id(self.value)))
def __eq__(self, __o: object) -> bool:
return isinstance(__o, type(self)) and id(self.value) == id(__o.value)
| IDKey |
python | RaRe-Technologies__gensim | gensim/models/logentropy_model.py | {
"start": 776,
"end": 5329
} | class ____(interfaces.TransformationABC):
r"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted matrix (positive floats).
This is done by a log entropy normalization, optionally normalizing the resulting documents to unit length.
The following formulas explain how o compute the log entropy weight for term :math:`i` in document :math:`j`:
.. math::
local\_weight_{i,j} = log(frequency_{i,j} + 1)
P_{i,j} = \frac{frequency_{i,j}}{\sum_j frequency_{i,j}}
global\_weight_i = 1 + \frac{\sum_j P_{i,j} * log(P_{i,j})}{log(number\_of\_documents + 1)}
final\_weight_{i,j} = local\_weight_{i,j} * global\_weight_i
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import LogEntropyModel
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>>
>>> dct = Dictionary(common_texts) # fit dictionary
>>> corpus = [dct.doc2bow(row) for row in common_texts] # convert to BoW format
>>> model = LogEntropyModel(corpus) # fit model
>>> vector = model[corpus[1]] # apply model to document
"""
def __init__(self, corpus, normalize=True):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus in BoW format.
normalize : bool, optional
If True, the resulted log entropy weighted vector will be normalized to length of 1,
If False - do nothing.
"""
self.normalize = normalize
self.n_docs = 0
self.n_words = 0
self.entr = {}
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "%s<n_docs=%s, n_words=%s>" % (self.__class__.__name__, self.n_docs, self.n_words)
def initialize(self, corpus):
"""Calculates the global weighting for all terms in a given corpus and transforms the simple
count representation into the log entropy normalized space.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus is BoW format
"""
logger.info("calculating counts")
glob_freq = {}
glob_num_words, doc_no = 0, -1
for doc_no, bow in enumerate(corpus):
if doc_no % 10000 == 0:
logger.info("PROGRESS: processing document #%i", doc_no)
glob_num_words += len(bow)
for term_id, term_count in bow:
glob_freq[term_id] = glob_freq.get(term_id, 0) + term_count
# keep some stats about the training corpus
self.n_docs = doc_no + 1
self.n_words = glob_num_words
# and finally compute the global weights
logger.info(
"calculating global log entropy weights for %i documents and %i features (%i matrix non-zeros)",
self.n_docs, len(glob_freq), self.n_words
)
logger.debug('iterating over corpus')
# initialize doc_no2 index in case corpus is empty
doc_no2 = 0
for doc_no2, bow in enumerate(corpus):
for key, freq in bow:
p = (float(freq) / glob_freq[key]) * math.log(float(freq) / glob_freq[key])
self.entr[key] = self.entr.get(key, 0.0) + p
if doc_no2 != doc_no:
raise ValueError("LogEntropyModel doesn't support generators as training data")
logger.debug('iterating over keys')
for key in self.entr:
self.entr[key] = 1 + self.entr[key] / math.log(self.n_docs + 1)
def __getitem__(self, bow):
"""Get log entropy representation of the input vector and/or corpus.
Parameters
----------
bow : list of (int, int)
Document in BoW format.
Returns
-------
list of (int, float)
Log-entropy vector for passed `bow`.
"""
# if the input vector is in fact a corpus, return a transformed corpus
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge)
vector = [
(term_id, math.log(tf + 1) * self.entr.get(term_id))
for term_id, tf in bow
if term_id in self.entr
]
if self.normalize:
vector = matutils.unitvec(vector)
return vector
| LogEntropyModel |
python | jmcnamara__XlsxWriter | xlsxwriter/test/vml/test_write_div.py | {
"start": 289,
"end": 738
} | class ____(unittest.TestCase):
"""
Test the Vml _write_div() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_div(self):
"""Test the _write_div() method"""
self.vml._write_div("left")
exp = """<div style="text-align:left"></div>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteDiv |
python | huggingface__transformers | tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py | {
"start": 1316,
"end": 7966
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_multiple_size=4,
hidden_act="gelu",
hidden_dropout=0.0,
attention_dropout=0.1,
weight_tying=True,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
bos_token_id=1,
eos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_multiple_size = intermediate_multiple_size
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.weight_tying = weight_tying
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return config, input_ids, input_mask, token_labels
def get_config(self):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_multiple_size=self.intermediate_multiple_size,
hidden_act=self.hidden_act,
hidden_dropout=self.hidden_dropout,
attention_dropout=self.attention_dropout,
weight_tying=self.weight_tying,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
)
def prepare_config_and_inputs_for_decoder(self):
config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs()
config.is_decoder = True
return config, input_ids, input_mask, token_labels
def create_and_check_model(self, config, input_ids, input_mask):
model = GPTNeoXJapaneseModel(config=config)
model.to(torch_device)
model.eval()
_ = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(self, config, input_ids, input_mask):
config.add_cross_attention = True
model = GPTNeoXJapaneseModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels):
model = GPTNeoXJapaneseForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask):
config.is_decoder = True
model = GPTNeoXJapaneseForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True)
output_from_no_past = output_from_no_past["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, token_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| GPTNeoXJapaneseModelTester |
python | numba__numba | numba/core/compiler_machinery.py | {
"start": 2828,
"end": 2920
} | class ____(CompilerPass):
""" Base class for lowering passes
"""
pass
| LoweringPass |
python | doocs__leetcode | solution/1800-1899/1856.Maximum Subarray Min-Product/Solution.py | {
"start": 0,
"end": 721
} | class ____:
def maxSumMinProduct(self, nums: List[int]) -> int:
n = len(nums)
left = [-1] * n
right = [n] * n
stk = []
for i, x in enumerate(nums):
while stk and nums[stk[-1]] >= x:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
while stk and nums[stk[-1]] > nums[i]:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
s = list(accumulate(nums, initial=0))
mod = 10**9 + 7
return max((s[right[i]] - s[left[i] + 1]) * x for i, x in enumerate(nums)) % mod
| Solution |
python | eth-brownie__brownie | brownie/network/gas/strategies.py | {
"start": 1328,
"end": 2434
} | class ____(TimeGasStrategy):
"""
Gas strategy for linear gas price increase.
Arguments
---------
initial_gas_price : int
The initial gas price to use in the first transaction
max_gas_price : int
The maximum gas price to use
increment : float
Multiplier applied to the previous gas price in order to determine the new gas price
time_duration : int
Number of seconds between transactions
"""
def __init__(
self,
initial_gas_price: Wei,
max_gas_price: Wei,
increment: float = 1.125,
time_duration: int = 30,
):
super().__init__(time_duration)
self.initial_gas_price = Wei(initial_gas_price)
self.max_gas_price = Wei(max_gas_price)
self.increment = increment
def get_gas_price(self) -> Generator[Wei, None, None]:
last_gas_price = self.initial_gas_price
yield last_gas_price
while True:
last_gas_price = min(Wei(last_gas_price * self.increment), self.max_gas_price)
yield last_gas_price
| LinearScalingStrategy |
python | pyca__cryptography | tests/hazmat/primitives/test_ssh.py | {
"start": 43312,
"end": 44932
} | class ____:
def test_load_ssh_public_key(self, backend):
ssh_key = (
b"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG2fgpmpYO61qeAxGd0wgRaN/E4"
b"GR+xWvBmvxjxrB1vG user@chiron.local"
)
key = load_ssh_public_key(ssh_key, backend)
assert isinstance(key, ed25519.Ed25519PublicKey)
assert key.public_bytes(Encoding.Raw, PublicFormat.Raw) == (
b"m\x9f\x82\x99\xa9`\xee\xb5\xa9\xe01\x19\xdd0\x81\x16\x8d\xfc"
b"N\x06G\xecV\xbc\x19\xaf\xc6<k\x07[\xc6"
)
def test_public_bytes_openssh(self, backend):
ssh_key = (
b"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG2fgpmpYO61qeAxGd0wgRaN/E4"
b"GR+xWvBmvxjxrB1vG"
)
key = load_ssh_public_key(ssh_key, backend)
assert isinstance(key, ed25519.Ed25519PublicKey)
assert (
key.public_bytes(Encoding.OpenSSH, PublicFormat.OpenSSH) == ssh_key
)
def test_load_ssh_public_key_not_32_bytes(self, backend):
ssh_key = (
b"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI22fgpmpYO61qeAxGd0wgRaN/E4"
b"GR+xWvBmvxjxrB1vGaGVs user@chiron.local"
)
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
def test_load_ssh_public_key_trailing_data(self, backend):
ssh_key = (
b"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG2fgpmpYO61qeAxGd0wgRa"
b"N/E4GR+xWvBmvxjxrB1vGdHJhaWxpbmdkYXRh user@chiron.local"
)
with pytest.raises(ValueError):
load_ssh_public_key(ssh_key, backend)
| TestEd25519SSHSerialization |
python | pytorch__pytorch | test/functorch/test_eager_transforms.py | {
"start": 57339,
"end": 79794
} | class ____(VmapTearDownMixin, TestCase):
@jacrev_and_jacfwd
def test_simple(self, device, jacapi):
x = torch.randn(3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.cos())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_simple_not_flat(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.view(-1).cos())
expected = expected.view(2, 3, 2, 3)
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_take(self, device, jacapi):
x = torch.rand(5)
def func(x):
y = torch.ones(3, dtype=torch.long)
z = torch.take(x, y)
return z
self.assertEqual(jacrev(func)(x), torch.autograd.functional.jacobian(func, x))
@jacrev_and_jacfwd
def test_diff_numel(self, device, jacapi):
x = torch.randn(2, 4, device=device)
# Tensor[2, 4] -> Tensor[3, 1]
def f(x):
return x[0, 1:].unsqueeze(-1)
y = jacapi(f)(x)
self.assertEqual(y.shape, (3, 1, 2, 4))
expected = x.new_zeros(3, 1, 2, 4)
expected[0, 0, 0, 1] = 1
expected[1, 0, 0, 2] = 1
expected[2, 0, 0, 3] = 1
self.assertEqual(y, expected)
@jacrev_and_jacfwd
def test_vmap_on_jac_simple(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = vmap(jacapi(torch.sin))(x)
expected = torch.stack([torch.diagflat(x[i].cos()) for i in range(2)])
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_nested_jac_simple(self, device, jacapi):
def foo(x):
return x.sin().sum()
x = torch.randn(3, device=device)
y = jacapi(jacapi(foo))(x)
expected = torch.diagflat(-x.sin())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_multiple_args(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=1)(x, y)
expected = torch.diagflat(x)
assert torch.allclose(z, expected)
@jacrev_and_jacfwd
def test_multiple_outputs_multiple_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out0_y = torch.diagflat(torch.full_like(y, 3))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
expected_out1_y = torch.diagflat(torch.full_like(y, 5))
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(len(z[0]), 2)
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z[0][0], expected_out0_x)
self.assertEqual(z[0][1], expected_out0_y)
self.assertEqual(z[1][0], expected_out1_x)
self.assertEqual(z[1][1], expected_out1_y)
@jacrev_and_jacfwd
def test_multiple_outputs_single_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
z = jacapi(f, argnums=0)(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(z, (expected_out0_x, expected_out1_x))
z = jacapi(f, argnums=(0,))(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z, ((expected_out0_x,), (expected_out1_x,)))
@jacrev_and_jacfwd
def test_multiple_outputs_pytree(self, device, jacapi):
def f(x, y):
return {"left": 2 * x + 3 * y, "right": 4 * x + 5 * y}
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_left_x = torch.diagflat(torch.full_like(x, 2))
expected_left_y = torch.diagflat(torch.full_like(y, 3))
expected_right_x = torch.diagflat(torch.full_like(x, 4))
expected_right_y = torch.diagflat(torch.full_like(y, 5))
expected = {
"left": (expected_left_x, expected_left_y),
"right": (expected_right_x, expected_right_y),
}
self.assertTrue(isinstance(z, dict))
self.assertTrue(isinstance(z["left"], tuple))
self.assertTrue(isinstance(z["right"], tuple))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_multiple_inputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2 + b * 3 + c * 4
x = torch.randn([], device=device)
args = ((x, x), x, x)
result = jacapi(f, argnums=(0, 1, 2))(*args)
expected = (
(torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
torch.tensor(3.0, device=device),
torch.tensor(4.0, device=device),
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = (
(torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
)
self.assertEqual(result, expected)
result = jacapi(f)(*args)
expected = (torch.tensor(1.0, device=device), torch.tensor(2.0, device=device))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_dimensionality(self, device, jacapi):
def f(x):
return x
x = torch.randn([], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 0)
self.assertEqual(result, torch.ones_like(x))
x = torch.randn([1], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 2)
self.assertEqual(result, x.new_ones(1, 1))
@jacrev_and_jacfwd
def test_aux_tensor(self, device, jacapi):
def f(x):
y = x.clone()
return y, y.cos()
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
self.assertEqual(aux, x.cos())
@jacrev_and_jacfwd
def test_aux_pytree(self, device, jacapi):
def f(x):
y = x.clone()
return y, {"a": y.cos(), "b": [y.tan()]}
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"Expected tensors, got unsupported type"
):
_ = jacapi(lambda x: (x, aux), has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError, r"Expected tensors, got unsupported type"
):
_ = jacapi(lambda x: (x, [x, aux]), has_aux=True)(x)
@jacrev_and_jacfwd
def test_outputs_can_any_pytree(self, device, jacapi):
x = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError,
r"(vjp|jvp).+: Expected f to be a function that has non-empty output",
):
jacapi(lambda _: output)(x)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError,
r"(vjp|jvp).+: expected f\(\*primals\) to return only tensors",
):
jacapi(lambda _: output)(x)
# Check list output
out = jacapi(lambda x: [x, x.sum()])(x)
assert isinstance(out, list) and len(out) == 2
# Check dict output
out = jacapi(lambda x: {"x": x, "xsum": x.sum()})(x)
assert isinstance(out, dict) and len(out) == 2 and "xsum" in out
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
out = jacapi(composite_output)(x)
assert isinstance(out, list)
assert isinstance(out[0], tuple) and isinstance(out[0][1], dict)
@jacrev_and_jacfwd
def test_multiple_inputs_outputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2, {"foo": b * 3 + c * 4}
x = torch.randn([], device=device)
zero = torch.zeros([], device=device)
args = ((x, x), x, x)
result = jacapi(f)(*args)
expected = (
(torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
{"foo": (zero, zero)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = (
((torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),),
{"foo": ((zero, zero),)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0, 1))(*args)
expected = (
(
(torch.tensor(1.0, device=device), torch.tensor(2.0, device=device)),
zero,
),
{"foo": ((zero, zero), torch.tensor(3.0, device=device))},
)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_multiple_inputs_outputs_pytree_multidim(self, device, jacapi):
def f(dct):
a = dct["a"]
b = dct["b"]
return {"c": a.sin(), "d": b.cos()}
x = torch.randn(3, device=device)
args = ({"a": x, "b": x},)
result = jacapi(f)(*args)
expected = {
"c": {"a": x.cos().diagflat(), "b": x.new_zeros(3, 3)},
"d": {"a": x.new_zeros(3, 3), "b": -x.sin().diagflat()},
}
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_input(self, device, jacapi):
def f(x, y):
return x
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
result = jacapi(f, argnums=(0, 1))(x, y)
expected0 = torch.eye(6, 6, device=device).view(2, 3, 2, 3)
expected1 = y.new_zeros(2, 3, 2, 3)
expected = (expected0, expected1)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_output(self, device, jacapi):
y = torch.randn(2, 3, device=device)
def f(x):
return y
x = torch.randn(2, 3, device=device)
result = jacapi(f)(x)
expected = x.new_zeros(2, 3, 2, 3)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_empty_output(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
def f(x, y):
return ()
with self.assertRaisesRegex(RuntimeError, "xpected"):
jacapi(f)(x, y)
@jacrev_and_jacfwd
def test_argnums_tuple(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=(0, 1))(x, y)
expected0 = torch.diagflat(y)
expected1 = torch.diagflat(x)
assert len(z) == 2
assert torch.allclose(z[0], expected0)
assert torch.allclose(z[1], expected1)
@jacrev_and_jacfwd
def test_argnums_effect_on_return(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=(0,))(x, y)
expected0 = torch.diagflat(y)
assert isinstance(z, tuple)
assert len(z) == 1
assert torch.allclose(z[0], expected0)
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=0)(x, y)
expected0 = torch.diagflat(y)
assert isinstance(z, torch.Tensor)
assert torch.allclose(z, expected0)
@jacrev_and_jacfwd
def test_argnums_defaults_to_zero(self, device, jacapi):
def f(x, y):
return x * 2 + y * 3
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f)(x, y)
expected = torch.diagflat(torch.full_like(x, 2))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_empty_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
jacapi(torch.sin, argnums=())(x)
@jacrev_and_jacfwd
def test_out_of_bounds_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "only 1 positional inputs"):
jacapi(torch.sin, argnums=2)(x)
@jacrev_and_jacfwd
def test_negative_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "only 1 positional inputs"):
jacapi(torch.sin, argnums=-2)(x)
@jacrev_and_jacfwd
def test_repeated_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be unique"):
jacapi(torch.sin, argnums=(0, 0))(x)
@jacrev_and_jacfwd
def test_float_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be int or Tuple"):
jacapi(torch.sin, argnums=0.0)(x)
with self.assertRaisesRegex(RuntimeError, "must be int"):
jacapi(torch.multiply, argnums=(1, 0.0))(x, x)
def test_hessian_simple(self, device):
def f(x):
return x.sin()
x = torch.randn(3, device=device)
hessian(f)(x)
def _test_against_reference(self, f, inputs, jacapi):
def foo(inputs):
return f(*inputs)
expected = torch.autograd.functional.jacobian(f, inputs)
result = jacapi(foo)(inputs)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_against_reference_simple(self, device, jacapi):
def f(x):
return 3 * x**2
x = torch.randn(2, 3, 5, device=device)
self._test_against_reference(f, (x,), jacapi)
@jacrev_and_jacfwd
def test_against_reference_multi_input(self, device, jacapi):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3, device=device)
y = torch.randn(3, 5, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_multi_input_multi_output(self, device, jacapi):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3, device=device)
y = torch.randn(3, 5, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_unrelated_outputs(self, device, jacapi):
def f(x, y):
return x, y, x, y
x = torch.randn(2, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_zero_dim(self, device, jacapi):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y), jacapi)
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([], device=device)
self._test_against_reference(g, (x,), jacapi)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([], device=device)
y = torch.randn(1, device=device)
self._test_against_reference(h, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_correctness_different_devices(self, device, jacapi):
def f(x, y):
return x * y, (x * y).to(device=device)
x = torch.randn(3)
y = torch.randn(3)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_default_arg(self, device, jacapi):
def f(x, y, z=3.0):
return x * y * z
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_inplace(self, device, jacapi):
def f(x, y):
y.copy_(x)
return y
out = jacapi(f, argnums=0) # x is differentiable
x, y = torch.randn(2, device=device), torch.randn(2, device=device)
self.assertEqual(out(x, y), torch.eye(y.shape[0]))
# testing tuple of argnums with the example that raised this issue originally
def g(x, y, z):
x[:2] = y
return torch.vstack([(x**2).sum(), (z**3).sum()])
out = jacapi(g, argnums=(1, 2))
x, y, z = (
torch.randn(3, device=device),
torch.randn(2, device=device),
torch.randn(2, device=device),
)
expected_out = (
torch.zeros(2, 1, 2, device=device),
torch.zeros(2, 1, 2, device=device),
)
expected_out[0][0][0] = 2 * y # top left corner
expected_out[1][1][0] = 3 * (z**2) # bottom right corner
out_val = out(x, y, z)
self.assertEqual(out_val, expected_out)
@parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev(self, device, _preallocate_and_copy):
x = torch.randn(10, 2, device=device)
y = torch.randn(1, 2, device=device)
def f(x, y):
return (x.sin(), x + y), (x + 2, x.sum())
for chunk_size in (1, 2, 3, 4, 7, 10, 1000):
expected = jacrev(f, argnums=(0, 1))(x, y)
actual = jacrev(
f,
argnums=(0, 1),
chunk_size=chunk_size,
_preallocate_and_copy=_preallocate_and_copy,
)(x, y)
self.assertEqual(actual, expected)
err_msg = "jacrev: `chunk_size` should be greater than 0."
with self.assertRaisesRegex(ValueError, err_msg):
jacrev(f, argnums=(0,), chunk_size=0)(x, y)
with self.assertRaisesRegex(ValueError, err_msg):
jacrev(f, argnums=(0,), chunk_size=-2)(x, y)
@parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev_composition(self, device, _preallocate_and_copy):
x = torch.randn(10, 2, device=device)
chunk_size = 3
def f(x):
return (x.sin(), x), (x + 2, x.sum())
expected = vmap(jacrev(jacrev(f)))(x)
actual = vmap(
jacrev(
jacrev(
f,
chunk_size=chunk_size,
_preallocate_and_copy=_preallocate_and_copy,
),
chunk_size=chunk_size,
)
)(x)
self.assertEqual(actual, expected)
# https://github.com/pytorch/pytorch/issues/127036
@xfailIfTorchDynamo
@parametrize("_preallocate_and_copy", (True, False))
def test_chunk_jacrev_chunksize_one(self, device, _preallocate_and_copy):
# With chunk_size=1, we shouldn't `vmap` and hence not be limited
# by it's constraints.
x = torch.randn(3, 3, device=device)
# Function with Dynamic Op in Backward.
# This should cause jacrev/vmap(vjp) to fail.
class IdentityWithDynamicBackwardOp(torch.autograd.Function):
@staticmethod
def forward(input):
return input
@staticmethod
def setup_context(ctx, inputs, output):
pass
@staticmethod
def backward(ctx, grad_output):
# dynamic op in backward pass.
grad_output.nonzero()
return grad_output
def f(x):
return IdentityWithDynamicBackwardOp.apply(x)
# With `chunk_size=1`, we don't use vmap. So the following should work.
jacfn = jacrev(f, chunk_size=1, _preallocate_and_copy=_preallocate_and_copy)
actual = jacfn(x)
expected = torch.autograd.functional.jacobian(f, x, vectorize=False)
self.assertEqual(actual, expected)
# Should fail with `chunk_size=2`.
msg = (
r"vmap: We do not support batching operators that can output dynamic shape."
)
with self.assertRaisesRegex(RuntimeError, msg):
jacrev(f, chunk_size=2, _preallocate_and_copy=_preallocate_and_copy)(x)
def test_complex_error(self, device):
# Verify complex input raises error
# C -> C
def fn(x):
return x.conj()
x = torch.randn(1, device=device, dtype=torch.cfloat)
with self.assertRaisesRegex(RuntimeError, "jacrev: Expected all inputs"):
jacrev(fn)(x)
with self.assertRaisesRegex(RuntimeError, "jacfwd: Expected all inputs"):
jacfwd(fn)(x)
# Verify complex output raises error
# R -> C
def fn(x):
return torch.conj(x * 0.5j)
x = torch.randn(1, device=device, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, "jacrev: Expected all outputs"):
jacrev(fn)(x)
with self.assertRaisesRegex(RuntimeError, "jacfwd: Expected all outputs"):
jacfwd(fn)(x)
@jacrev_and_jacfwd
def test_jac_with_non_tensor_args(self, device, jacapi):
def f(t, int_x):
return t + int_x
t = torch.randn(3, 3, device=device)
actual = jacapi(f)(t, 3)
expected = torch.autograd.functional.jacobian(partial(f, int_x=3), t)
self.assertEqual(actual, expected)
@markDynamoStrictTest
| TestJac |
python | getsentry__sentry | src/sentry/relocation/api/endpoints/details.py | {
"start": 574,
"end": 1417
} | class ____(Endpoint):
owner = ApiOwner.HYBRID_CLOUD
publish_status = {
# TODO(getsentry/team-ospo#214): Stabilize before GA.
"GET": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (SuperuserOrStaffFeatureFlaggedPermission,)
def get(self, request: Request, relocation_uuid: str) -> Response:
"""
Get a single relocation.
``````````````````````````````````````````````````
:pparam string relocation_uuid: a UUID identifying the relocation.
:auth: required
"""
logger.info("relocations.details.get.start", extra={"caller": request.user.id})
try:
return self.respond(serialize(Relocation.objects.get(uuid=relocation_uuid)))
except Relocation.DoesNotExist:
raise ResourceDoesNotExist
| RelocationDetailsEndpoint |
python | huggingface__transformers | src/transformers/models/glm4_moe/modeling_glm4_moe.py | {
"start": 25633,
"end": 28757
} | class ____(Glm4MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Glm4MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Glm4MoeForCausalLM
>>> model = Glm4MoeForCausalLM.from_pretrained("meta-glm4_moe/Glm4Moe-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-glm4_moe/Glm4Moe-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["Glm4MoePreTrainedModel", "Glm4MoeModel", "Glm4MoeForCausalLM"]
| Glm4MoeForCausalLM |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 68769,
"end": 71852
} | class ____:
def test_basic(self, xp):
actual = signal.order_filter(xp.asarray([1, 2, 3]), xp.asarray([1, 0, 1]), 1)
expect = xp.asarray([2, 3, 2])
xp_assert_equal(actual, expect)
def test_doc_example(self, xp):
x = xp.reshape(xp.arange(25, dtype=xp_default_dtype(xp)), (5, 5))
domain = xp.eye(3, dtype=xp_default_dtype(xp))
# minimum of elements 1,3,9 (zero-padded) on phone pad
# 7,5,3 on numpad
expected = xp.asarray(
[[0., 0., 0., 0., 0.],
[0., 0., 1., 2., 0.],
[0., 5., 6., 7., 0.],
[0., 10., 11., 12., 0.],
[0., 0., 0., 0., 0.]],
dtype=xp_default_dtype(xp)
)
xp_assert_close(signal.order_filter(x, domain, 0), expected)
# maximum of elements 1,3,9 (zero-padded) on phone pad
# 7,5,3 on numpad
expected = xp.asarray(
[[6., 7., 8., 9., 4.],
[11., 12., 13., 14., 9.],
[16., 17., 18., 19., 14.],
[21., 22., 23., 24., 19.],
[20., 21., 22., 23., 24.]],
)
xp_assert_close(signal.order_filter(x, domain, 2), expected)
# and, just to complete the set, median of zero-padded elements
expected = xp.asarray(
[[0, 1, 2, 3, 0],
[5, 6, 7, 8, 3],
[10, 11, 12, 13, 8],
[15, 16, 17, 18, 13],
[0, 15, 16, 17, 18]],
dtype=xp_default_dtype(xp)
)
xp_assert_close(signal.order_filter(x, domain, 1), expected)
@xfail_xp_backends('dask.array', reason='repeat requires an axis')
@xfail_xp_backends('torch', reason='array-api-compat#292')
@make_xp_test_case(signal.medfilt)
def test_medfilt_order_filter(self, xp):
x = xp.reshape(xp.arange(25), (5, 5))
# median of zero-padded elements 1,5,9 on phone pad
# 7,5,3 on numpad
expected = xp.asarray(
[[0, 1, 2, 3, 0],
[1, 6, 7, 8, 4],
[6, 11, 12, 13, 9],
[11, 16, 17, 18, 14],
[0, 16, 17, 18, 0]],
)
xp_assert_close(signal.medfilt(x, 3), expected)
xp_assert_close(
signal.order_filter(x, xp.ones((3, 3)), 4),
expected
)
def test_order_filter_asymmetric(self, xp):
x = xp.reshape(xp.arange(25), (5, 5))
domain = xp.asarray(
[[1, 1, 0],
[0, 1, 0],
[0, 0, 0]],
)
expected = xp.asarray(
[[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 5, 6, 7, 8],
[0, 10, 11, 12, 13],
[0, 15, 16, 17, 18]]
)
xp_assert_close(signal.order_filter(x, domain, 0), expected)
expected = xp.asarray(
[[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]]
)
xp_assert_close(signal.order_filter(x, domain, 1), expected)
@make_xp_test_case(lfilter)
| TestOrderFilt |
python | google__jax | tests/array_test.py | {
"start": 62829,
"end": 66138
} | class ____(jtu.JaxTestCase):
# tests that the PRNGs are automatically sharded as expected
@parameterized.named_parameters(("3", 3), ("4", 4), ("5", 5))
@jtu.skip_on_devices("gpu")
def test_random_bits_is_pure_map_1d(self, num_devices):
@jax.jit
def f(x):
bits = prng.threefry_random_bits(jnp.array([0, 0], dtype='uint32'),
32, x.shape)
return bits + x
mesh = jtu.create_mesh((num_devices,), ('x',), iota_order=True)
s = jax.sharding.NamedSharding(mesh, P('x'))
n = num_devices ** 2
global_x = jnp.arange(n).astype('uint32')
x = array.make_array_from_callback(global_x.shape, s, lambda i: global_x[i])
# check computation is fully partitioned and without any communication
with jax.threefry_partitionable(True):
unopt_txt = f.lower(x).as_text(dialect='hlo')
opt_txt = f.lower(x).compile().as_text()
self.assertIn( f'[{n}]', unopt_txt)
self.assertNotIn(f'[{n}]', opt_txt)
self.assertNotIn('all-reduce', opt_txt)
self.assertNotIn('collective-permute', opt_txt)
# check against single-device reference
y = f(x)
y_ref1 = f(jax.device_put(x, jax.devices()[0]))
self.assertArraysEqual(y, y_ref1)
@parameterized.named_parameters(
{"testcase_name": f"_{mesh_shape}_{pspec}",
"mesh_shape": mesh_shape, "pspec": pspec}
for mesh_shape in [(3, 2), (4, 2), (2, 3)]
for pspec in [P('x', None), P(None, 'y'), P('x', 'y')])
@jtu.skip_on_devices("gpu")
def test_random_bits_is_pure_map_2d(self, mesh_shape, pspec):
@jax.jit
def f(x):
bits = prng.threefry_random_bits(jnp.array([0, 0], dtype='uint32'),
32, x.shape)
return bits + x
global_shape = tuple(np.square(mesh_shape))
mesh = jtu.create_mesh(mesh_shape, ('x', 'y'), iota_order=True)
s = jax.sharding.NamedSharding(mesh, pspec)
n = math.prod(global_shape)
global_x = np.arange(n).astype('uint32').reshape(global_shape)
x = array.make_array_from_callback(global_x.shape, s, lambda i: global_x[i])
# check computation is fully partitioned and without any communication
with jax.threefry_partitionable(True):
unopt_txt = f.lower(x).as_text(dialect='hlo')
opt_txt = f.lower(x).compile().as_text()
global_shape_fmt = ','.join(str(x) for x in global_shape)
self.assertIn( f'[{global_shape_fmt}]', unopt_txt)
self.assertNotIn(f'[{global_shape_fmt}]', opt_txt)
self.assertNotIn('all-reduce', opt_txt)
self.assertNotIn('collective-permute', opt_txt)
# check against single-device reference
y = f(x)
y_ref1 = f(jax.device_put(x, jax.devices()[0]))
self.assertArraysEqual(y, y_ref1)
def test_empty_mesh_creation(self):
mesh = jax.sharding.Mesh(devices=np.empty((), dtype=object), axis_names=[])
self.assertTrue(mesh.empty)
self.assertEqual(mesh.size, 0)
abstract_mesh = mesh.abstract_mesh
self.assertTrue(abstract_mesh.empty)
self.assertEqual(abstract_mesh.size, 0)
abstract_mesh2 = jax.sharding.AbstractMesh((), ())
self.assertTrue(abstract_mesh2.empty)
self.assertEqual(abstract_mesh2.size, 0)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| RngShardingTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/looker.py | {
"start": 1200,
"end": 4077
} | class ____(GoogleCloudBaseOperator):
"""
Submits a PDT materialization job to Looker.
:param looker_conn_id: Required. The connection ID to use connecting to Looker.
:param model: Required. The model of the PDT to start building.
:param view: Required. The view of the PDT to start building.
:param query_params: Optional. Additional materialization parameters.
:param asynchronous: Optional. Flag indicating whether to wait for the job
to finish or return immediately.
This is useful for submitting long running jobs and
waiting on them asynchronously using the LookerCheckPdtBuildSensor
:param cancel_on_kill: Optional. Flag which indicates whether cancel the
hook's job or not, when on_kill is called.
:param wait_time: Optional. Number of seconds between checks for job to be
ready. Used only if ``asynchronous`` is False.
:param wait_timeout: Optional. How many seconds wait for job to be ready.
Used only if ``asynchronous`` is False.
"""
def __init__(
self,
looker_conn_id: str,
model: str,
view: str,
query_params: dict | None = None,
asynchronous: bool = False,
cancel_on_kill: bool = True,
wait_time: int = 10,
wait_timeout: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model = model
self.view = view
self.query_params = query_params
self.looker_conn_id = looker_conn_id
self.asynchronous = asynchronous
self.cancel_on_kill = cancel_on_kill
self.wait_time = wait_time
self.wait_timeout = wait_timeout
self.hook: LookerHook | None = None
self.materialization_id: str | None = None
def execute(self, context: Context) -> str:
self.hook = LookerHook(looker_conn_id=self.looker_conn_id)
resp = self.hook.start_pdt_build(
model=self.model,
view=self.view,
query_params=self.query_params,
)
self.materialization_id = resp.materialization_id
if not self.materialization_id:
raise AirflowException(
f"No `materialization_id` was returned for model: {self.model}, view: {self.view}."
)
self.log.info("PDT materialization job submitted successfully. Job id: %s.", self.materialization_id)
if not self.asynchronous:
self.hook.wait_for_job(
materialization_id=self.materialization_id,
wait_time=self.wait_time,
timeout=self.wait_timeout,
)
return self.materialization_id
def on_kill(self):
if self.materialization_id and self.cancel_on_kill:
self.hook.stop_pdt_build(materialization_id=self.materialization_id)
| LookerStartPdtBuildOperator |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_inlinehilite.py | {
"start": 8155,
"end": 9527
} | class ____(util.MdCase):
"""Test inline highlight with CodeHilite."""
extension = [
'markdown.extensions.codehilite',
'pymdownx.inlinehilite',
]
extension_configs = {
'markdown.extensions.codehilite': {
'guess_lang': False
},
'pymdownx.inlinehilite': {
'style_plain_text': True
}
}
def test_codehilite(self):
"""Test CodeHilite."""
# Test #! original syntax
self.check_markdown(
r'`#!python import module`.',
r'<p><code class="highlight"><span class="kn">import</span><span class="w"> </span><span class="nn">module</span></code>.</p>' # noqa: E501
)
# Test ::: syntax
self.check_markdown(
r'`:::python import module`.',
r'<p><code class="highlight"><span class="kn">import</span><span class="w"> </span><span class="nn">module</span></code>.</p>' # noqa: E501
)
# Test escaping language with space
self.check_markdown(
r'` #!python import module`.',
r'<p><code class="highlight">#!python import module</code>.</p>'
)
# Test bad language
self.check_markdown(
r'`#!bad import module`.',
r'<p><code class="highlight">import module</code>.</p>'
)
| TestInlineHiliteCodeHilite |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 60807,
"end": 62776
} | class ____(TestCase):
"""Tests for ``padded()``"""
def test_no_n(self):
seq = [1, 2, 3]
# No fillvalue
self.assertEqual(mi.take(5, mi.padded(seq)), [1, 2, 3, None, None])
# With fillvalue
self.assertEqual(
mi.take(5, mi.padded(seq, fillvalue='')), [1, 2, 3, '', '']
)
def test_invalid_n(self):
self.assertRaises(ValueError, lambda: list(mi.padded([1, 2, 3], n=-1)))
self.assertRaises(ValueError, lambda: list(mi.padded([1, 2, 3], n=0)))
def test_valid_n(self):
seq = [1, 2, 3, 4, 5]
# No need for padding: len(seq) <= n
self.assertEqual(list(mi.padded(seq, n=4)), [1, 2, 3, 4, 5])
self.assertEqual(list(mi.padded(seq, n=5)), [1, 2, 3, 4, 5])
# No fillvalue
self.assertEqual(
list(mi.padded(seq, n=7)), [1, 2, 3, 4, 5, None, None]
)
# With fillvalue
self.assertEqual(
list(mi.padded(seq, fillvalue='', n=7)), [1, 2, 3, 4, 5, '', '']
)
def test_next_multiple(self):
seq = [1, 2, 3, 4, 5, 6]
# No need for padding: len(seq) % n == 0
self.assertEqual(
list(mi.padded(seq, n=3, next_multiple=True)), [1, 2, 3, 4, 5, 6]
)
# Padding needed: len(seq) < n
self.assertEqual(
list(mi.padded(seq, n=8, next_multiple=True)),
[1, 2, 3, 4, 5, 6, None, None],
)
# No padding needed: len(seq) == n
self.assertEqual(
list(mi.padded(seq, n=6, next_multiple=True)), [1, 2, 3, 4, 5, 6]
)
# Padding needed: len(seq) > n
self.assertEqual(
list(mi.padded(seq, n=4, next_multiple=True)),
[1, 2, 3, 4, 5, 6, None, None],
)
# With fillvalue
self.assertEqual(
list(mi.padded(seq, fillvalue='', n=4, next_multiple=True)),
[1, 2, 3, 4, 5, 6, '', ''],
)
| PaddedTest |
python | viewflow__viewflow | viewflow/contrib/admin/apps.py | {
"start": 302,
"end": 444
} | class ____(AppConfig):
"""Default application config."""
name = "viewflow.contrib.admin"
label = "viewflow_admin"
| ViewflowAdminConfig |
python | PyCQA__pylint | tests/functional/r/regression/regression_9865_calling_bound_lambda.py | {
"start": 160,
"end": 266
} | class ____:
eq = lambda self, y: self == y
def test_lambda_method():
ret = C().eq(1)
return ret
| C |
python | bokeh__bokeh | src/bokeh/models/mappers.py | {
"start": 3348,
"end": 4905
} | class ____(Mapper):
''' Base class for mappers that map categorical factors to other values.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
factors = FactorSeq(help="""
A sequence of factors / categories that map to the some target range. For
example the following color mapper:
.. code-block:: python
mapper = CategoricalColorMapper(palette=["red", "blue"], factors=["foo", "bar"])
will map the factor ``"foo"`` to red and the factor ``"bar"`` to blue.
""")
start = Int(default=0, help="""
A start index to "slice" data factors with before mapping.
For example, if the data to color map consists of 2-level factors such
as ``["2016", "sales"]`` and ``["2016", "marketing"]``, then setting
``start=1`` will perform color mapping only based on the second sub-factor
(i.e. in this case based on the department ``"sales"`` or ``"marketing"``)
""")
end = Nullable(Int, help="""
A start index to "slice" data factors with before mapping.
For example, if the data to color map consists of 2-level factors such
as ``["2016", "sales"]`` and ``["2017", "marketing"]``, then setting
``end=1`` will perform color mapping only based on the first sub-factor
(i.e. in this case based on the year ``"2016"`` or ``"2017"``)
If ``None`` then all sub-factors from ``start`` to the end of the
factor will be used for color mapping.
""")
| CategoricalMapper |
python | ray-project__ray | python/ray/serve/tests/test_util.py | {
"start": 2543,
"end": 8207
} | class ____:
def test_merge_empty(self):
assert {"env_vars": {}} == override_runtime_envs_except_env_vars({}, {})
def test_merge_empty_parent(self):
child = {"env_vars": {"test1": "test_val"}, "working_dir": "."}
assert child == override_runtime_envs_except_env_vars({}, child)
def test_merge_empty_child(self):
parent = {"env_vars": {"test1": "test_val"}, "working_dir": "."}
assert parent == override_runtime_envs_except_env_vars(parent, {})
@pytest.mark.parametrize("invalid_env", [None, 0, "runtime_env", set()])
def test_invalid_type(self, invalid_env):
with pytest.raises(TypeError):
override_runtime_envs_except_env_vars(invalid_env, {})
with pytest.raises(TypeError):
override_runtime_envs_except_env_vars({}, invalid_env)
with pytest.raises(TypeError):
override_runtime_envs_except_env_vars(invalid_env, invalid_env)
def test_basic_merge(self):
parent = {
"py_modules": ["http://test.com/test0.zip", "s3://path/test1.zip"],
"working_dir": "gs://path/test2.zip",
"env_vars": {"test": "val", "trial": "val2"},
"pip": ["pandas", "numpy"],
"excludes": ["my_file.txt"],
}
original_parent = parent.copy()
child = {
"py_modules": [],
"working_dir": "s3://path/test1.zip",
"env_vars": {"test": "val", "trial": "val2"},
"pip": ["numpy"],
}
original_child = child.copy()
merged = override_runtime_envs_except_env_vars(parent, child)
assert original_parent == parent
assert original_child == child
assert merged == {
"py_modules": [],
"working_dir": "s3://path/test1.zip",
"env_vars": {"test": "val", "trial": "val2"},
"pip": ["numpy"],
"excludes": ["my_file.txt"],
}
def test_merge_deep_copy(self):
"""Check that the env values are actually deep-copied."""
parent_env_vars = {"parent": "pval"}
child_env_vars = {"child": "cval"}
parent = {"env_vars": parent_env_vars}
child = {"env_vars": child_env_vars}
original_parent = parent.copy()
original_child = child.copy()
merged = override_runtime_envs_except_env_vars(parent, child)
assert merged["env_vars"] == {"parent": "pval", "child": "cval"}
assert original_parent == parent
assert original_child == child
def test_merge_empty_env_vars(self):
env_vars = {"test": "val", "trial": "val2"}
non_empty = {"env_vars": {"test": "val", "trial": "val2"}}
empty = {}
assert (
env_vars
== override_runtime_envs_except_env_vars(non_empty, empty)["env_vars"]
)
assert (
env_vars
== override_runtime_envs_except_env_vars(empty, non_empty)["env_vars"]
)
assert {} == override_runtime_envs_except_env_vars(empty, empty)["env_vars"]
def test_merge_env_vars(self):
parent = {
"py_modules": ["http://test.com/test0.zip", "s3://path/test1.zip"],
"working_dir": "gs://path/test2.zip",
"env_vars": {"parent": "pval", "override": "old"},
"pip": ["pandas", "numpy"],
"excludes": ["my_file.txt"],
}
child = {
"py_modules": [],
"working_dir": "s3://path/test1.zip",
"env_vars": {"child": "cval", "override": "new"},
"pip": ["numpy"],
}
merged = override_runtime_envs_except_env_vars(parent, child)
assert merged == {
"py_modules": [],
"working_dir": "s3://path/test1.zip",
"env_vars": {"parent": "pval", "child": "cval", "override": "new"},
"pip": ["numpy"],
"excludes": ["my_file.txt"],
}
def test_inheritance_regression(self):
"""Check if the general Ray runtime_env inheritance behavior matches.
override_runtime_envs_except_env_vars should match the general Ray
runtime_env inheritance behavior. This test checks if that behavior
has changed, which would indicate a regression in
override_runtime_envs_except_env_vars. If the runtime_env inheritance
behavior changes, override_runtime_envs_except_env_vars should also
change to match.
"""
with ray.init(
runtime_env={
"py_modules": [TEST_DAG_PINNED_URI],
"env_vars": {"var1": "hello"},
}
):
@ray.remote
def check_module():
# Check that Ray job's py_module loaded correctly
from conditional_dag import serve_dag # noqa: F401
return os.getenv("var1")
assert ray.get(check_module.remote()) == "hello"
@ray.remote(
runtime_env={
"py_modules": [TEST_DEPLOY_GROUP_PINNED_URI],
"env_vars": {"var2": "world"},
}
)
def test_task():
with pytest.raises(ImportError):
# Check that Ray job's py_module was overwritten
from conditional_dag import serve_dag # noqa: F401
from test_env.shallow_import import ShallowClass
if ShallowClass()() == "Hello shallow world!":
return os.getenv("var1") + " " + os.getenv("var2")
assert ray.get(test_task.remote()) == "hello world"
| TestOverrideRuntimeEnvsExceptEnvVars |
python | pennersr__django-allauth | allauth/headless/mfa/response.py | {
"start": 2315,
"end": 2524
} | class ____(APIResponse):
def __init__(self, request, authenticator, meta=None):
data = _authenticator_data(authenticator)
super().__init__(request, data=data, meta=meta)
| AuthenticatorResponse |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/time.py | {
"start": 8733,
"end": 18503
} | class ____(TimeDTypeBase[np.dtypes.TimeDelta64DType, np.timedelta64], HasEndianness):
"""
A Zarr data type for arrays containing NumPy TimeDelta64 data.
Wraps the ``np.dtypesTimeDelta64DType`` data type. Scalars for this data type
are instances of `np.timedelta64`.
Attributes
----------
dtype_cls : Type[np.dtypesTimeDelta64DType]
The NumPy dtype class for this data type.
scale_factor : int
The scale factor for this data type.
unit : DateTimeUnit
The unit for this data type.
References
----------
The Zarr V2 representation of this data type is defined in the Zarr V2
[specification document](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding).
The Zarr V3 representation of this data type is defined in the ``numpy.timedelta64``
[specification document](https://github.com/zarr-developers/zarr-extensions/tree/main/data-types/numpy.timedelta64)
"""
# mypy infers the type of np.dtypes.TimeDelta64DType to be
# "Callable[[Literal['Y', 'M', 'W', 'D'] | Literal['h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as']], Never]"
dtype_cls = np.dtypes.TimeDelta64DType # type: ignore[assignment]
unit: DateTimeUnit = "generic"
scale_factor: int = 1
_zarr_v3_name: ClassVar[Literal["numpy.timedelta64"]] = "numpy.timedelta64"
_zarr_v2_names: ClassVar[tuple[Literal[">m8"], Literal["<m8"]]] = (">m8", "<m8")
_numpy_name: ClassVar[Literal["timedelta64"]] = "timedelta64"
@classmethod
def _check_json_v2(cls, data: DTypeJSON) -> TypeGuard[TimeDelta64JSON_V2]:
"""
Validate that the provided JSON input accurately represents a NumPy timedelta64 data type,
which could be in the form of strings like "<m8" or ">m8[10s]". This method serves as a type
guard, helping to refine the type of unknown JSON input by confirming its adherence to the
expected format for NumPy timedelta64 data types.
The JSON input should contain a "name" key with a value that matches the expected string
pattern for NumPy timedelta64 data types. The pattern includes an optional unit enclosed
within square brackets, following the base type identifier.
Returns
-------
bool
True if the JSON input is a valid representation of this class,
otherwise False.
"""
if not check_dtype_spec_v2(data):
return False
name = data["name"]
# match <m[ns], >m[M], etc
# consider making this a standalone function
if not isinstance(name, str):
return False
if not name.startswith(cls._zarr_v2_names):
return False
if len(name) == 3:
# no unit, and
# we already checked that this string is either <m8 or >m8
return True
else:
return name[4:-1].endswith(DATETIME_UNIT) and name[-1] == "]"
@classmethod
def _check_json_v3(cls, data: DTypeJSON) -> TypeGuard[DateTime64JSON_V3]:
"""
Check that the input is a valid JSON representation of this class in Zarr V3.
Returns
-------
TypeGuard[DateTime64JSON_V3]
True if the JSON input is a valid representation of this class,
otherwise False.
"""
return (
isinstance(data, dict)
and set(data.keys()) == {"name", "configuration"}
and data["name"] == cls._zarr_v3_name
and isinstance(data["configuration"], dict)
and set(data["configuration"].keys()) == {"unit", "scale_factor"}
)
@classmethod
def _from_json_v2(cls, data: DTypeJSON) -> Self:
"""
Create a TimeDelta64 from a Zarr V2-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
TimeDelta64
An instance of TimeDelta64.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class.
"""
if cls._check_json_v2(data):
name = data["name"]
return cls.from_native_dtype(np.dtype(name))
msg = (
f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected a string "
f"representation of an instance of {cls.dtype_cls}"
)
raise DataTypeValidationError(msg)
@classmethod
def _from_json_v3(cls, data: DTypeJSON) -> Self:
"""
Create a TimeDelta64 from a Zarr V3-flavored JSON.
The JSON representation of a TimeDelta64 in Zarr V3 is a dict with a 'name' key
with the value 'numpy.timedelta64', and a 'configuration' key with a value of a dict
with a 'unit' key and a 'scale_factor' key.
For example:
```json
{
"name": "numpy.timedelta64",
"configuration": {
"unit": "generic",
"scale_factor": 1
}
}
```
"""
if cls._check_json_v3(data):
unit = data["configuration"]["unit"]
scale_factor = data["configuration"]["scale_factor"]
return cls(unit=unit, scale_factor=scale_factor)
msg = (
f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected a dict "
f"with a 'name' key with the value 'numpy.timedelta64', "
"and a 'configuration' key with a value of a dict with a 'unit' key and a "
"'scale_factor' key"
)
raise DataTypeValidationError(msg)
@overload
def to_json(self, zarr_format: Literal[2]) -> TimeDelta64JSON_V2: ...
@overload
def to_json(self, zarr_format: Literal[3]) -> TimeDelta64JSON_V3: ...
def to_json(self, zarr_format: ZarrFormat) -> TimeDelta64JSON_V2 | TimeDelta64JSON_V3:
"""
Serialize this data type to JSON.
Parameters
----------
zarr_format : ZarrFormat
The Zarr format version (2 or 3).
Returns
-------
TimeDelta64JSON_V2 | TimeDelta64JSON_V3
The JSON representation of the data type.
Raises
------
ValueError
If the zarr_format is not 2 or 3.
"""
if zarr_format == 2:
name = self.to_native_dtype().str
return {"name": name, "object_codec_id": None}
elif zarr_format == 3:
return {
"name": self._zarr_v3_name,
"configuration": {"unit": self.unit, "scale_factor": self.scale_factor},
}
raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}") # pragma: no cover
def _check_scalar(self, data: object) -> TypeGuard[TimeDeltaLike]:
"""
Check if the input is a scalar of this data type.
Parameters
----------
data : object
The object to check.
Returns
-------
TypeGuard[TimeDeltaLike]
True if the input is a scalar of this data type, False otherwise.
"""
if data is None:
return True
return isinstance(data, str | int | bytes | np.timedelta64 | timedelta)
def _cast_scalar_unchecked(self, data: TimeDeltaLike) -> np.timedelta64:
"""
Cast the provided scalar input to a numpy timedelta64 without any type checking.
This method assumes that the input data is already a valid scalar of this data type,
and does not perform any validation or type checks. It directly casts the input
to a numpy timedelta64 scalar using the unit and scale factor defined in the class.
Parameters
----------
data : TimeDeltaLike
The scalar input data to cast.
Returns
-------
numpy.timedelta64
The input data cast as a numpy timedelta64 scalar.
"""
return self.to_native_dtype().type(data, f"{self.scale_factor}{self.unit}")
def cast_scalar(self, data: object) -> np.timedelta64:
"""
Cast the input to a numpy timedelta64 scalar. If the input is not a scalar of this data type,
raise a TypeError.
"""
if self._check_scalar(data):
return self._cast_scalar_unchecked(data)
msg = (
f"Cannot convert object {data!r} with type {type(data)} to a scalar compatible with the "
f"data type {self}."
)
raise TypeError(msg)
def default_scalar(self) -> np.timedelta64:
"""
Return a default scalar of this data type.
This method provides a default value for the timedelta64 scalar, which is
a 'Not-a-Time' (NaT) value.
"""
return np.timedelta64("NaT")
def from_json_scalar(self, data: JSON, *, zarr_format: ZarrFormat) -> np.timedelta64:
"""
Create a scalar of this data type from JSON input.
Parameters
----------
data : JSON
The JSON representation of the scalar value.
zarr_format : int
The zarr format to use for the JSON representation.
Returns
-------
numpy.timedelta64
The scalar value of this data type.
Raises
------
TypeError
If the input JSON is not a valid representation of a scalar for this data type.
"""
if check_json_time(data):
return self.to_native_dtype().type(data, f"{self.scale_factor}{self.unit}")
raise TypeError(f"Invalid type: {data}. Expected an integer.") # pragma: no cover
@dataclass(frozen=True, kw_only=True, slots=True)
| TimeDelta64 |
python | redis__redis-py | redis/client.py | {
"start": 50767,
"end": 52369
} | class ____(threading.Thread):
def __init__(
self,
pubsub,
sleep_time: float,
daemon: bool = False,
exception_handler: Union[
Callable[[Exception, "PubSub", "PubSubWorkerThread"], None], None
] = None,
sharded_pubsub: bool = False,
):
super().__init__()
self.daemon = daemon
self.pubsub = pubsub
self.sleep_time = sleep_time
self.exception_handler = exception_handler
self.sharded_pubsub = sharded_pubsub
self._running = threading.Event()
def run(self) -> None:
if self._running.is_set():
return
self._running.set()
pubsub = self.pubsub
sleep_time = self.sleep_time
while self._running.is_set():
try:
if not self.sharded_pubsub:
pubsub.get_message(
ignore_subscribe_messages=True, timeout=sleep_time
)
else:
pubsub.get_sharded_message(
ignore_subscribe_messages=True, timeout=sleep_time
)
except BaseException as e:
if self.exception_handler is None:
raise
self.exception_handler(e, pubsub, self)
pubsub.close()
def stop(self) -> None:
# trip the flag so the run loop exits. the run loop will
# close the pubsub connection, which disconnects the socket
# and returns the connection to the pool.
self._running.clear()
| PubSubWorkerThread |
python | getsentry__sentry | src/sentry/integrations/example/integration.py | {
"start": 8546,
"end": 8699
} | class ____(ExampleIntegrationProvider):
key = "aliased"
integration_key = "example"
name = "Integration Key Example"
| AliasedIntegrationProvider |
python | django__django | django/views/generic/dates.py | {
"start": 18384,
"end": 18564
} | class ____(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""List of objects published in a given week."""
template_name_suffix = "_archive_week"
| WeekArchiveView |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 120421,
"end": 122446
} | class ____:
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
for dt, sz in [(np.float32, 15), (np.float64, 7)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
for i in range(inp.size):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: f'{inp!r}\n{msg}'
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"invalid value encountered in reduce",
RuntimeWarning)
assert_(np.isnan(inp.max()), msg=emsg)
assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
inp[i] = -1e10
assert_equal(inp.min(), -1e10, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
def test_reduce_reorder(self):
# gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
# and put it before the call to an intrinsic function that causes
# invalid status to be set. Also make sure warnings are not emitted
for n in (2, 4, 8, 16, 32):
for dt in (np.float32, np.float16, np.complex64):
for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
assert_equal(np.min(r), np.nan)
def test_minimize_no_warns(self):
a = np.minimum(np.nan, 1)
assert_equal(a, np.nan)
| TestMinMax |
python | openai__openai-python | src/openai/types/eval_create_params.py | {
"start": 5307,
"end": 6087
} | class ____(TypedDict, total=False):
input: Required[Iterable[TestingCriterionLabelModelInput]]
"""A list of chat messages forming the prompt or context.
May include variable references to the `item` namespace, ie {{item.name}}.
"""
labels: Required[SequenceNotStr[str]]
"""The labels to classify to each item in the evaluation."""
model: Required[str]
"""The model to use for the evaluation. Must support structured outputs."""
name: Required[str]
"""The name of the grader."""
passing_labels: Required[SequenceNotStr[str]]
"""The labels that indicate a passing result. Must be a subset of labels."""
type: Required[Literal["label_model"]]
"""The object type, which is always `label_model`."""
| TestingCriterionLabelModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.