language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 223691,
"end": 234444
} | class ____(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
# doc string or None The class docstring
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
# bases ExprNode Expression that evaluates to a tuple of base classes
#
# The following subnodes are constructed internally:
#
# doc_node NameNode '__doc__' name that is made available to the class body
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
# orig_bases None or ExprNode "bases" before transformation by PEP560 __mro_entries__,
# used to create the __orig_bases__ attribute
child_attrs = ["doc_node", "body", "dict", "metaclass", "mkw", "bases", "class_result",
"target", "class_cell", "decorators", "orig_bases"]
decorators = None
class_result = None
is_py3_style_class = False # Python3 style class (kwargs)
metaclass = None
mkw = None
doc_node = None
orig_bases = None
def __init__(self, pos, name, bases, doc, body, decorators=None,
keyword_args=None, force_py3_semantics=False):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
self.decorators = decorators
self.bases = bases
from . import ExprNodes
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.UnicodeNode(pos, value=doc)
self.doc_node = ExprNodes.NameNode(name=EncodedString('__doc__'), type=py_object_type, pos=pos)
else:
doc_node = None
allow_py2_metaclass = not force_py3_semantics
if keyword_args:
allow_py2_metaclass = False
self.is_py3_style_class = True
if keyword_args.is_dict_literal:
if keyword_args.key_value_pairs:
for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
if item.key.value == 'metaclass':
if self.metaclass is not None:
error(item.pos, "keyword argument 'metaclass' passed multiple times")
# special case: we already know the metaclass,
# so we don't need to do the "build kwargs,
# find metaclass" dance at runtime
self.metaclass = item.value
del keyword_args.key_value_pairs[i]
self.mkw = keyword_args
else:
assert self.metaclass is not None
else:
# MergedDictNode
self.mkw = ExprNodes.ProxyNode(keyword_args)
if force_py3_semantics or self.bases or self.mkw or self.metaclass:
if self.metaclass is None:
if keyword_args and not keyword_args.is_dict_literal:
# **kwargs may contain 'metaclass' arg
mkdict = self.mkw
else:
mkdict = None
if (not mkdict and
self.bases.is_sequence_constructor and
not self.bases.args):
pass # no base classes => no inherited metaclass
else:
self.metaclass = ExprNodes.PyClassMetaclassNode(
pos, class_def_node=self)
needs_metaclass_calculation = False
else:
needs_metaclass_calculation = True
self.dict = ExprNodes.PyClassNamespaceNode(
pos, name=name, doc=doc_node, class_def_node=self)
self.classobj = ExprNodes.Py3ClassNode(
pos, name=name, class_def_node=self, doc=doc_node,
calculate_metaclass=needs_metaclass_calculation,
allow_py2_metaclass=allow_py2_metaclass,
force_type=force_py3_semantics,
)
else:
# no bases, no metaclass => old style class creation
self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
self.classobj = ExprNodes.ClassNode(
pos, name=name, class_def_node=self, doc=doc_node)
self.target = ExprNodes.NameNode(pos, name=name)
self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos)
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
"""
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
from . import ExprNodes
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
class_name=self.name,
bases=self.bases or ExprNodes.TupleNode(self.pos, args=[]),
decorators=self.decorators,
body=self.body,
in_pxd=False,
doc=self.doc)
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv)
return cenv
def analyse_declarations(self, env):
unwrapped_class_result = class_result = self.classobj
if self.decorators:
from .ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
class_result = SimpleCallNode(
decorator.pos,
function=decorator.decorator,
args=[class_result])
self.decorators = None
self.class_result = class_result
if self.bases:
self.bases.analyse_declarations(env)
if self.mkw:
self.mkw.analyse_declarations(env)
self.class_result.analyse_declarations(env)
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
if self.doc_node:
self.doc_node.analyse_target_declaration(cenv)
self.body.analyse_declarations(cenv)
unwrapped_class_result.analyse_annotations(cenv)
update_bases_functype = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("bases", PyrexTypes.py_object_type, None)
])
def analyse_expressions(self, env):
if self.bases and not (self.bases.is_sequence_constructor and len(self.bases.args) == 0):
from .ExprNodes import PythonCapiCallNode, CloneNode
# handle the Python 3.7 __mro_entries__ transformation
orig_bases = self.bases.analyse_expressions(env)
self.bases = PythonCapiCallNode(orig_bases.pos,
function_name="__Pyx_PEP560_update_bases",
func_type=self.update_bases_functype,
utility_code=UtilityCode.load_cached('Py3UpdateBases', 'ObjectHandling.c'),
args=[CloneNode(orig_bases)])
self.orig_bases = orig_bases
if self.bases:
self.bases = self.bases.analyse_expressions(env)
if self.mkw:
self.mkw = self.mkw.analyse_expressions(env)
if self.metaclass:
self.metaclass = self.metaclass.analyse_expressions(env)
self.dict = self.dict.analyse_expressions(env)
self.class_result = self.class_result.analyse_expressions(env)
cenv = self.scope
self.body = self.body.analyse_expressions(cenv)
self.target = self.target.analyse_target_expression(env, self.classobj)
self.class_cell = self.class_cell.analyse_expressions(cenv)
return self
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.pyclass_stack.append(self)
cenv = self.scope
if self.orig_bases:
self.orig_bases.generate_evaluation_code(code)
if self.bases:
self.bases.generate_evaluation_code(code)
if self.mkw:
self.mkw.generate_evaluation_code(code)
if self.metaclass:
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
if self.orig_bases:
# update __orig_bases__ if needed
code.putln("if (%s != %s) {" % (self.bases.result(), self.orig_bases.result()))
code.putln(
code.error_goto_if_neg('PyDict_SetItemString(%s, "__orig_bases__", %s)' % (
self.dict.result(), self.orig_bases.result()),
self.pos
))
code.putln("}")
self.orig_bases.generate_disposal_code(code)
self.orig_bases.free_temps(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
class_cell = self.class_cell
if class_cell is not None and not class_cell.is_active:
class_cell = None
if class_cell is not None:
class_cell.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.class_result.generate_evaluation_code(code)
if class_cell is not None:
class_cell.generate_injection_code(
code, self.class_result.result())
if class_cell is not None:
class_cell.generate_disposal_code(code)
class_cell.free_temps(code)
cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
self.target.generate_assignment_code(self.class_result, code)
self.dict.generate_disposal_code(code)
self.dict.free_temps(code)
if self.metaclass:
self.metaclass.generate_disposal_code(code)
self.metaclass.free_temps(code)
if self.mkw:
self.mkw.generate_disposal_code(code)
self.mkw.free_temps(code)
if self.bases:
self.bases.generate_disposal_code(code)
self.bases.free_temps(code)
code.pyclass_stack.pop()
| PyClassDefNode |
python | sympy__sympy | sympy/stats/frv.py | {
"start": 1693,
"end": 2359
} | class ____(RandomDomain):
"""
A domain with discrete finite support
Represented using a FiniteSet.
"""
is_Finite = True
@property
def symbols(self):
return FiniteSet(sym for sym, val in self.elements)
@property
def elements(self):
return self.args[0]
@property
def dict(self):
return FiniteSet(*[Dict(dict(el)) for el in self.elements])
def __contains__(self, other):
return other in self.elements
def __iter__(self):
return self.elements.__iter__()
def as_boolean(self):
return Or(*[And(*[Eq(sym, val) for sym, val in item]) for item in self])
| FiniteDomain |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 650058,
"end": 650533
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node", "permission")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="node")
permission = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryPermission), graphql_name="permission"
)
| TeamRepositoryEdge |
python | pennersr__django-allauth | allauth/headless/mfa/response.py | {
"start": 1612,
"end": 1910
} | class ____(APIResponse):
def __init__(self, request, secret, totp_url):
super().__init__(
request,
meta={
"secret": secret,
"totp_url": totp_url,
},
status=HTTPStatus.NOT_FOUND,
)
| TOTPNotFoundResponse |
python | catalyst-team__catalyst | catalyst/contrib/datasets/imagewang.py | {
"start": 469,
"end": 934
} | class ____(ImageClassificationDataset):
"""
`Imagewang <https://github.com/fastai/imagenette#image%E7%BD%91>`_ Dataset
with images resized so that the shortest size is 160 px.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewang-160"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewang-160.tgz",
"1dc388d37d1dc52836c06749e14e37bc",
)
]
| Imagewang160 |
python | python-attrs__attrs | tests/dataclass_transform_example.py | {
"start": 613,
"end": 795
} | class ____:
_a: int = attrs.field(alias="_a")
af = AliasedField(42)
reveal_type(af.__init__) # noqa: F821
# unsafe_hash is accepted
@attrs.define(unsafe_hash=True)
| AliasedField |
python | celery__celery | t/unit/backends/test_rpc.py | {
"start": 546,
"end": 3692
} | class ____:
def setup_method(self):
self.b = RPCBackend(app=self.app)
def test_oid(self):
oid = self.b.oid
oid2 = self.b.oid
assert uuid.UUID(oid)
assert oid == oid2
assert oid == self.app.thread_oid
def test_oid_threads(self):
# Verify that two RPC backends executed in different threads
# has different oid.
oid = self.b.oid
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(lambda: RPCBackend(app=self.app).oid)
thread_oid = future.result()
assert uuid.UUID(oid)
assert uuid.UUID(thread_oid)
assert oid == self.app.thread_oid
assert thread_oid != oid
def test_interface(self):
self.b.on_reply_declare('task_id')
def test_ensure_chords_allowed(self):
with pytest.raises(NotImplementedError):
self.b.ensure_chords_allowed()
def test_apply_chord(self):
with pytest.raises(NotImplementedError):
self.b.apply_chord(self.app.GroupResult(), None)
@pytest.mark.celery(result_backend='rpc')
def test_chord_raises_error(self):
with pytest.raises(NotImplementedError):
chord(self.add.s(i, i) for i in range(10))(self.add.s([2]))
@pytest.mark.celery(result_backend='rpc')
def test_chain_with_chord_raises_error(self):
with pytest.raises(NotImplementedError):
(self.add.s(2, 2) |
group(self.add.s(2, 2),
self.add.s(5, 6)) | self.add.s()).delay()
def test_destination_for(self):
req = Mock(name='request')
req.reply_to = 'reply_to'
req.correlation_id = 'corid'
assert self.b.destination_for('task_id', req) == ('reply_to', 'corid')
task = Mock()
_task_stack.push(task)
try:
task.request.reply_to = 'reply_to'
task.request.correlation_id = 'corid'
assert self.b.destination_for('task_id', None) == (
'reply_to', 'corid',
)
finally:
_task_stack.pop()
with pytest.raises(RuntimeError):
self.b.destination_for('task_id', None)
def test_binding(self):
queue = self.b.binding
assert queue.name == self.b.oid
assert queue.exchange == self.b.exchange
assert queue.routing_key == self.b.oid
assert not queue.durable
assert queue.auto_delete
def test_create_binding(self):
assert self.b._create_binding('id') == self.b.binding
def test_on_task_call(self):
with patch('celery.backends.rpc.maybe_declare') as md:
with self.app.amqp.producer_pool.acquire() as prod:
self.b.on_task_call(prod, 'task_id'),
md.assert_called_with(
self.b.binding(prod.channel),
retry=True,
)
def test_create_exchange(self):
ex = self.b._create_exchange('name')
assert isinstance(ex, self.b.Exchange)
assert ex.name == ''
| test_RPCBackend |
python | pyca__cryptography | tests/hazmat/primitives/test_serialization.py | {
"start": 52143,
"end": 52618
} | class ____:
def test_non_bytes_password(self):
with pytest.raises(ValueError):
BestAvailableEncryption(object()) # type:ignore[arg-type]
def test_encryption_with_zero_length_password(self):
with pytest.raises(ValueError):
BestAvailableEncryption(b"")
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
| TestKeySerializationEncryptionTypes |
python | pytorch__pytorch | torch/export/graph_signature.py | {
"start": 1010,
"end": 1141
} | class ____:
name: str
class_fqn: str
fake_val: Optional[FakeScriptObject] = None
@dataclasses.dataclass
| CustomObjArgument |
python | pytorch__pytorch | torch/_export/passes/add_runtime_assertions_for_constraints_pass.py | {
"start": 518,
"end": 1129
} | class ____(NamedTuple):
input_name: str
dim: int
def _convert_to_int(val):
# Convert simple sympy Integers into concrete int
if val in (sympy.oo, int_oo):
return math.inf
if val in (-sympy.oo, -int_oo):
return -math.inf
if isinstance(val, sympy.Integer):
return int(val)
raise RuntimeError("Export constraints cannot be non-integer expressions")
def _convert_range_to_int(range: ValueRanges):
assert isinstance(range, ValueRanges)
min_val = _convert_to_int(range.lower)
max_val = _convert_to_int(range.upper)
return min_val, max_val
| InputDim |
python | doocs__leetcode | solution/1400-1499/1410.HTML Entity Parser/Solution.py | {
"start": 0,
"end": 607
} | class ____:
def entityParser(self, text: str) -> str:
d = {
'"': '"',
''': "'",
'&': "&",
">": '>',
"<": '<',
"⁄": '/',
}
i, n = 0, len(text)
ans = []
while i < n:
for l in range(1, 8):
j = i + l
if text[i:j] in d:
ans.append(d[text[i:j]])
i = j
break
else:
ans.append(text[i])
i += 1
return ''.join(ans)
| Solution |
python | ZoranPandovski__al-go-rithms | search/Traversal/Pre Order Traversal Binary Tree/Python/koffy.py | {
"start": 190,
"end": 475
} | class ____:
def preorderTraversal(self, root: TreeNode):
result = []
if root:
result.append(root.val)
result += self.preorderTraversal(root.left)
result += self.preorderTraversal(root.right)
return result | Solution |
python | numpy__numpy | numpy/polynomial/tests/test_printing.py | {
"start": 15003,
"end": 18946
} | class ____:
"""Test the latex repr used by Jupyter"""
@staticmethod
def as_latex(obj):
# right now we ignore the formatting of scalars in our tests, since
# it makes them too verbose. Ideally, the formatting of scalars will
# be fixed such that tests below continue to pass
obj._repr_latex_scalar = lambda x, parens=False: str(x)
try:
return obj._repr_latex_()
finally:
del obj._repr_latex_scalar
def test_simple_polynomial(self):
# default input
p = poly.Polynomial([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$')
# translated input
p = poly.Polynomial([1, 2, 3], domain=[-2, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # noqa: E501
# scaled input
p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$')
# affine input
p = poly.Polynomial([1, 2, 3], domain=[-1, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') # noqa: E501
def test_basis_func(self):
p = poly.Chebyshev([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$')
# affine input - check no surplus parens are added
p = poly.Chebyshev([1, 2, 3], domain=[-1, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') # noqa: E501
def test_multichar_basis_func(self):
p = poly.HermiteE([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$')
def test_symbol_basic(self):
# default input
p = poly.Polynomial([1, 2, 3], symbol='z')
assert_equal(self.as_latex(p),
r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$')
# translated input
p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z')
assert_equal(
self.as_latex(p),
(
r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,'
r'\left(1.0 + z\right)^{2}$'
),
)
# scaled input
p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z')
assert_equal(
self.as_latex(p),
(
r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,'
r'\left(2.0z\right)^{2}$'
),
)
# affine input
p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z')
assert_equal(
self.as_latex(p),
(
r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,'
r'\left(1.0 + 2.0z\right)^{2}$'
),
)
def test_numeric_object_coefficients(self):
coefs = array([Fraction(1, 2), Fraction(1)])
p = poly.Polynomial(coefs)
assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$')
SWITCH_TO_EXP = (
'1.0 + (1.0e-01) x + (1.0e-02) x**2',
'1.2 + (1.2e-01) x + (1.2e-02) x**2',
'1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3',
'1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3',
'1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4',
'1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + '
'(1.23457e-04) x**4',
'1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + '
'(1.234568e-04) x**4 + (1.234568e-05) x**5',
'1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + '
'(1.2345679e-04) x**4 + (1.2345679e-05) x**5')
| TestLatexRepr |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/aioodbc.py | {
"start": 1809,
"end": 2021
} | class ____(aiodbcConnector, MSDialect_pyodbc):
driver = "aioodbc"
supports_statement_cache = True
execution_ctx_cls = MSExecutionContext_aioodbc
dialect = MSDialectAsync_aioodbc
| MSDialectAsync_aioodbc |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 2494,
"end": 2568
} | class ____(Protocol):
def m(self, x: Self) -> None: ...
| Proto_ContraSelf |
python | huggingface__transformers | tests/models/siglip/test_image_processing_siglip.py | {
"start": 1011,
"end": 3040
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest with CLIP->Siglip
| SiglipImageProcessingTester |
python | huggingface__transformers | src/transformers/models/big_bird/modeling_big_bird.py | {
"start": 100725,
"end": 105486
} | class ____(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BigBirdModel(config)
self.classifier = BigBirdClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[SequenceClassifierOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, BigBirdForSequenceClassification
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("l-yohai/bigbird-roberta-base-mnli")
>>> model = BigBirdForSequenceClassification.from_pretrained("l-yohai/bigbird-roberta-base-mnli")
>>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT
>>> LONG_ARTICLE = squad_ds[81514]["context"]
>>> inputs = tokenizer(LONG_ARTICLE, return_tensors="pt")
>>> # long input article
>>> list(inputs["input_ids"].shape)
[1, 919]
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax().item()
>>> model.config.id2label[predicted_class_id]
'LABEL_0'
```
```python
>>> num_labels = len(model.config.id2label)
>>> model = BigBirdForSequenceClassification.from_pretrained(
... "l-yohai/bigbird-roberta-base-mnli", num_labels=num_labels
... )
>>> labels = torch.tensor(1)
>>> loss = model(**inputs, labels=labels).loss
>>> round(loss.item(), 2)
1.13
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| BigBirdForSequenceClassification |
python | psf__requests | src/requests/auth.py | {
"start": 2220,
"end": 2851
} | class ____(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all(
[
self.username == getattr(other, "username", None),
self.password == getattr(other, "password", None),
]
)
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
return r
| HTTPBasicAuth |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 41137,
"end": 63728
} | class ____(NonStrictDataModel):
"""
:param id: Task id
:type id: str
:param name: Task Name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company ID
:type company: str
:param type: Type of task. Values: 'training', 'testing'
:type type: TaskTypeEnum
:param status:
:type status: TaskStatusEnum
:param comment: Free text comment
:type comment: str
:param created: Task creation time (UTC)
:type created: datetime.datetime
:param started: Task start time (UTC)
:type started: datetime.datetime
:param completed: Task end time (UTC)
:type completed: datetime.datetime
:param parent: Parent task id
:type parent: str
:param project: Project ID of the project to which this task is assigned
:type project: str
:param output: Task output params
:type output: Output
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param status_changed: Last status change time
:type status_changed: datetime.datetime
:param status_message: free text string representing info about the status
:type status_message: str
:param status_reason: Reason for last status change
:type status_reason: str
:param published: Last status change time
:type published: datetime.datetime
:param last_worker: ID of last worker that handled the task
:type last_worker: str
:param last_worker_report: Last time a worker reported while working on this
task
:type last_worker_report: datetime.datetime
:param last_update: Last time this task was created, updated, changed or events
for this task were reported
:type last_update: datetime.datetime
:param last_iteration: Last iteration reported for this task
:type last_iteration: int
:param last_metrics: Last metric variants (hash to events), one for each metric
hash
:type last_metrics: dict
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
"""
_schema = {
"properties": {
"comment": {"description": "Free text comment", "type": ["string", "null"]},
"company": {"description": "Company ID", "type": ["string", "null"]},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": ["object", "null"],
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [{"$ref": "#/definitions/execution"}, {"type": "null"}],
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {"$ref": "#/definitions/last_metrics_variants"},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": "Last time this task was created, updated, changed or events for this task were reported",
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {"description": "Parent task id", "type": ["string", "null"]},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [{"$ref": "#/definitions/task_status_enum"}, {"type": "null"}],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'training', 'testing'",
"oneOf": [{"$ref": "#/definitions/task_type_enum"}, {"type": "null"}],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
type: Any = None,
status: Any = None,
comment: Optional[str] = None,
created: Optional[str] = None,
started: Optional[str] = None,
completed: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
output: Any = None,
execution: Any = None,
script: Any = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
status_changed: Optional[str] = None,
status_message: Optional[str] = None,
status_reason: Optional[str] = None,
published: Optional[str] = None,
last_worker: Optional[str] = None,
last_worker_report: Optional[str] = None,
last_update: Optional[str] = None,
last_iteration: Optional[int] = None,
last_metrics: Optional[dict] = None,
hyperparams: Optional[dict] = None,
configuration: Optional[dict] = None,
**kwargs: Any
) -> None:
super(Task, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.type = type
self.status = status
self.comment = comment
self.created = created
self.started = started
self.completed = completed
self.parent = parent
self.project = project
self.output = output
self.execution = execution
self.script = script
self.tags = tags
self.system_tags = system_tags
self.status_changed = status_changed
self.status_message = status_message
self.status_reason = status_reason
self.published = published
self.last_worker = last_worker
self.last_worker_report = last_worker_report
self.last_update = last_update
self.last_iteration = last_iteration
self.last_metrics = last_metrics
self.hyperparams = hyperparams
self.configuration = configuration
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("status")
def status(self) -> Any:
return self._property_status
@status.setter
def status(self, value: Any) -> None:
if value is None:
self._property_status = None
return
if isinstance(value, six.string_types):
try:
value = TaskStatusEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "status", enum.Enum)
self._property_status = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("started")
def started(self) -> Optional[str]:
return self._property_started
@started.setter
def started(self, value: Optional[str]) -> None:
if value is None:
self._property_started = None
return
self.assert_isinstance(value, "started", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_started = value
@schema_property("completed")
def completed(self) -> Optional[str]:
return self._property_completed
@completed.setter
def completed(self, value: Optional[str]) -> None:
if value is None:
self._property_completed = None
return
self.assert_isinstance(value, "completed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_completed = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output")
def output(self) -> Any:
return self._property_output
@output.setter
def output(self, value: Any) -> None:
if value is None:
self._property_output = None
return
if isinstance(value, dict):
value = Output.from_dict(value)
else:
self.assert_isinstance(value, "output", Output)
self._property_output = value
@schema_property("execution")
def execution(self) -> Any:
return self._property_execution
@execution.setter
def execution(self, value: Any) -> None:
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("script")
def script(self) -> Any:
return self._property_script
@script.setter
def script(self, value: Any) -> None:
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status_changed")
def status_changed(self) -> Optional[str]:
return self._property_status_changed
@status_changed.setter
def status_changed(self, value: Optional[str]) -> None:
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_status_changed = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("published")
def published(self) -> Optional[str]:
return self._property_published
@published.setter
def published(self, value: Optional[str]) -> None:
if value is None:
self._property_published = None
return
self.assert_isinstance(value, "published", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_published = value
@schema_property("last_worker")
def last_worker(self) -> Optional[str]:
return self._property_last_worker
@last_worker.setter
def last_worker(self, value: Optional[str]) -> None:
if value is None:
self._property_last_worker = None
return
self.assert_isinstance(value, "last_worker", six.string_types)
self._property_last_worker = value
@schema_property("last_worker_report")
def last_worker_report(self) -> Optional[str]:
return self._property_last_worker_report
@last_worker_report.setter
def last_worker_report(self, value: Optional[str]) -> None:
if value is None:
self._property_last_worker_report = None
return
self.assert_isinstance(value, "last_worker_report", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_worker_report = value
@schema_property("last_update")
def last_update(self) -> Optional[str]:
return self._property_last_update
@last_update.setter
def last_update(self, value: Optional[str]) -> None:
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("last_iteration")
def last_iteration(self) -> Optional[int]:
return self._property_last_iteration
@last_iteration.setter
def last_iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_last_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "last_iteration", six.integer_types)
self._property_last_iteration = value
@schema_property("last_metrics")
def last_metrics(self) -> Optional[dict]:
return self._property_last_metrics
@last_metrics.setter
def last_metrics(self, value: Optional[dict]) -> None:
if value is None:
self._property_last_metrics = None
return
self.assert_isinstance(value, "last_metrics", (dict,))
self._property_last_metrics = value
@schema_property("hyperparams")
def hyperparams(self) -> Optional[dict]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True)
self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True)
value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self) -> Optional[dict]:
return self._property_configuration
@configuration.setter
def configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_configuration = value
| Task |
python | sympy__sympy | sympy/matrices/expressions/matexpr.py | {
"start": 20833,
"end": 23055
} | class ____(MatrixExpr):
"""Symbolic representation of a Matrix object
Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
Examples
========
>>> from sympy import MatrixSymbol, Identity
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_commutative = False
is_symbol = True
_diff_wrt = True
def __new__(cls, name, n, m):
n, m = _sympify(n), _sympify(m)
cls._check_dim(m)
cls._check_dim(n)
if isinstance(name, str):
name = Str(name)
obj = Basic.__new__(cls, name, n, m)
return obj
@property
def shape(self):
return self.args[1], self.args[2]
@property
def name(self):
return self.args[0].name
def _entry(self, i, j, **kwargs):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return {self}
def _eval_simplify(self, **kwargs):
return self
def _eval_derivative(self, x):
# x is a scalar:
if self.free_symbols & x.free_symbols:
if isinstance(x, MatrixElement) and self == x.parent:
from .special import MatrixUnit
return MatrixUnit(self.shape[0], self.shape[1], x.i, x.j)
return None
else:
return ZeroMatrix(self.shape[0], self.shape[1])
def _eval_derivative_matrix_lines(self, x):
if self != x:
first = ZeroMatrix(x.shape[0], self.shape[0]) if self.shape[0] != 1 else S.Zero
second = ZeroMatrix(x.shape[1], self.shape[1]) if self.shape[1] != 1 else S.Zero
return [_LeftRightArgs(
[first, second],
)]
else:
first = Identity(self.shape[0]) if self.shape[0] != 1 else S.One
second = Identity(self.shape[1]) if self.shape[1] != 1 else S.One
return [_LeftRightArgs(
[first, second],
)]
def matrix_symbols(expr):
return [sym for sym in expr.free_symbols if sym.is_Matrix]
| MatrixSymbol |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 46264,
"end": 50920
} | class ____(BaseModel, extra=Extra.forbid):
"""
Serve metadata with system-level info and details on all applications deployed to
the Ray cluster.
This is the response JSON schema for v2 REST API `GET /api/serve/applications`.
"""
controller_info: ServeActorDetails = Field(
description="Details about the Serve controller actor."
)
proxy_location: Optional[ProxyLocation] = Field(
description=(
"Config for where to run proxies for ingress traffic to the cluster.\n"
'- "Disabled": disable the proxies entirely.\n'
'- "HeadOnly": run only one proxy on the head node.\n'
'- "EveryNode": run proxies on every node that has at least one replica.\n'
),
)
http_options: Optional[HTTPOptionsSchema] = Field(description="HTTP Proxy options.")
grpc_options: Optional[gRPCOptionsSchema] = Field(description="gRPC Proxy options.")
proxies: Dict[str, ProxyDetails] = Field(
description=(
"Mapping from node_id to details about the Proxy running on that node."
)
)
deploy_mode: ServeDeployMode = Field(
default=ServeDeployMode.MULTI_APP,
description=(
"[DEPRECATED]: single-app configs are removed, so this is always "
"MULTI_APP. This field will be removed in a future release."
),
)
applications: Dict[str, ApplicationDetails] = Field(
description="Details about all live applications running on the cluster."
)
target_capacity: Optional[float] = TARGET_CAPACITY_FIELD
target_groups: List[TargetGroup] = Field(
default_factory=list,
description=(
"List of target groups, each containing target info for a given route and "
"protocol."
),
)
@staticmethod
def get_empty_schema_dict() -> Dict:
"""Empty Serve instance details dictionary.
Represents no Serve instance running on the cluster.
"""
return {
"deploy_mode": "MULTI_APP",
"controller_info": {},
"proxies": {},
"applications": {},
"target_capacity": None,
}
def _get_status(self) -> ServeStatus:
return ServeStatus(
target_capacity=self.target_capacity,
proxies={node_id: proxy.status for node_id, proxy in self.proxies.items()},
applications={
app_name: ApplicationStatusOverview(
status=app.status,
message=app.message,
last_deployed_time_s=app.last_deployed_time_s,
deployments={
deployment_name: DeploymentStatusOverview(
status=deployment.status,
status_trigger=deployment.status_trigger,
replica_states=dict(
Counter([r.state.value for r in deployment.replicas])
),
message=deployment.message,
)
for deployment_name, deployment in app.deployments.items()
},
)
for app_name, app in self.applications.items()
},
)
def _get_user_facing_json_serializable_dict(
self, *args, **kwargs
) -> Dict[str, Any]:
"""Generates json serializable dictionary with user facing data."""
values = super().dict(*args, **kwargs)
# `serialized_policy_def` and internal router config fields are only used
# internally and should not be exposed to the REST api. This method iteratively
# removes them from each deployment config if exists.
for app_name, application in values["applications"].items():
for deployment_name, deployment in application["deployments"].items():
if "deployment_config" in deployment:
# Remove internal fields from request_router_config if it exists
if "request_router_config" in deployment["deployment_config"]:
deployment["deployment_config"]["request_router_config"].pop(
"_serialized_request_router_cls", None
)
if "autoscaling_config" in deployment["deployment_config"]:
deployment["deployment_config"]["autoscaling_config"].pop(
"_serialized_policy_def", None
)
return values
@PublicAPI(stability="alpha")
| ServeInstanceDetails |
python | sympy__sympy | sympy/integrals/heurisch.py | {
"start": 6832,
"end": 7968
} | class ____:
"""
Derivatives of Bessel functions of orders n and n-1
in terms of each other.
See the docstring of DiffCache.
"""
def __init__(self):
self.table = {}
self.n = Dummy('n')
self.z = Dummy('z')
self._create_table()
def _create_table(t):
table, n, z = t.table, t.n, t.z
for f in (besselj, bessely, hankel1, hankel2):
table[f] = (f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
f = besseli
table[f] = (f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z + f(n, z))
f = besselk
table[f] = (-f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
for f in (jn, yn):
table[f] = (f(n-1, z) - (n+1)*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
def diffs(t, f, n, z):
if f in t.table:
diff0, diff1 = t.table[f]
repl = [(t.n, n), (t.z, z)]
return (diff0.subs(repl), diff1.subs(repl))
def has(t, f):
return f in t.table
_bessel_table = None
| BesselTable |
python | modin-project__modin | modin/experimental/xgboost/utils.py | {
"start": 2494,
"end": 3478
} | class ____:
"""
Context to connect a worker to a rabit tracker.
Parameters
----------
actor_rank : int
Rank of actor, connected to this context.
args : list
List with environment variables for Rabit Tracker.
"""
def __init__(self, actor_rank, args):
self.args = args
self.args.append(("DMLC_TASK_ID=[modin.xgboost]:" + str(actor_rank)).encode())
def __enter__(self):
"""
Entry point of context.
Connects to Rabit Tracker.
"""
xgb.rabit.init(self.args)
LOGGER.info("-------------- rabit started ------------------")
def __exit__(self, *args):
"""
Exit point of context.
Disconnects from Rabit Tracker.
Parameters
----------
*args : iterable
Parameters for Exception capturing.
"""
xgb.rabit.finalize()
LOGGER.info("-------------- rabit finished ------------------")
| RabitContext |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 17010,
"end": 17475
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
| EsmOutput |
python | getsentry__sentry | src/sentry/grouping/enhancer/parser.py | {
"start": 1527,
"end": 5860
} | class ____(NodeVisitor[list[EnhancementRule]]):
visit_comment = visit_empty = lambda *a: None
unwrapped_exceptions = (InvalidEnhancerConfig,)
def visit_enhancements(
self, node: Node, children: list[EnhancementRule | None]
) -> list[EnhancementRule]:
rules = []
for child in children:
if not isinstance(child, str) and child is not None:
rules.append(child)
return rules
def visit_line(
self, node: Node, children: tuple[object, list[EnhancementRule | None], object]
) -> EnhancementRule | None:
_, line, _ = children
comment_or_rule_or_empty = line[0]
if comment_or_rule_or_empty:
return comment_or_rule_or_empty
return None
def visit_rule(
self, node: Node, children: tuple[object, list[EnhancementMatch], list[EnhancementAction]]
) -> EnhancementRule:
_, matcher, actions = children
return EnhancementRule(matcher, actions)
def visit_matchers(
self, node: Node, children: tuple[list[CallerMatch], list[FrameMatch], list[CalleeMatch]]
) -> list[EnhancementMatch]:
caller_matcher, frame_matchers, callee_matcher = children
return [*caller_matcher, *frame_matchers, *callee_matcher]
def visit_caller_matcher(
self,
node: Node,
children: tuple[object, object, object, FrameMatch, object, object, object, object],
) -> CallerMatch:
_, _, _, inner, _, _, _, _ = children
return CallerMatch(inner)
def visit_callee_matcher(
self,
node: Node,
children: tuple[object, object, object, object, object, FrameMatch, object, object],
) -> CalleeMatch:
_, _, _, _, _, inner, _, _ = children
return CalleeMatch(inner)
def visit_frame_matcher(
self, node: Node, children: tuple[object, bool, str, object, str]
) -> EnhancementMatch:
_, negation, ty, _, argument = children
return FrameMatch.from_key(ty, argument, bool(negation))
def visit_matcher_type(self, node: Node, children: object) -> str:
return node.text
def visit_argument(self, node: Node, children: list[str]) -> str:
return children[0]
def visit_action(
self, node: Node, children: list[FlagAction | VarAction]
) -> FlagAction | VarAction:
return children[0]
def visit_flag_action(
self, node: Node, children: tuple[object, list[str] | None, bool, str]
) -> FlagAction:
_, rng, flag, action_name = children
return FlagAction(action_name, flag, rng[0] if rng else None)
def visit_flag_action_name(self, node: Node, children: object) -> str:
return node.text
def visit_var_action(
self, node: Node, children: tuple[object, str, object, object, object, str]
) -> VarAction:
_, var_name, _, _, _, arg = children
return VarAction(var_name, arg)
def visit_var_name(self, node: Node, children: object) -> str:
return node.text
def visit_flag(self, node: Node, children: object) -> bool:
return node.text == "+"
def visit_range(self, node: Node, children: object) -> str:
if node.text == "^":
return "up"
return "down"
def visit_quoted(self, node: Node, children: object) -> str:
return unescape_string(node.text[1:-1])
def visit_unquoted(self, node: Node, children: object) -> str:
return node.text
def generic_visit[T](self, node: Node, children: T) -> T:
return children
def visit_ident(self, node: Node, children: object) -> str:
return node.text
def visit_quoted_ident(self, node: RegexNode, children: object) -> str:
# leading ! are used to indicate negation. make sure they don't appear.
return node.match.groups()[0].lstrip("!")
def parse_enhancements(s: str) -> list[EnhancementRule]:
try:
tree = enhancements_grammar.parse(s)
return EnhancementsVisitor().visit(tree)
except ParseError as e:
context = e.text[e.pos : e.pos + 33]
if len(context) == 33:
context = context[:-1] + "..."
raise InvalidEnhancerConfig(
f'Invalid syntax near "{context}" (line {e.line()}, column {e.column()})'
)
| EnhancementsVisitor |
python | tensorflow__tensorflow | tensorflow/python/framework/ops.py | {
"start": 228716,
"end": 232549
} | class ____(enum.Enum):
OFF: int = 0
LEGACY: int = 1
SAFE: int = 2
ALL: int = 3
_dtype_conversion_mode: PromoMode = PromoMode.OFF
def get_dtype_conversion_mode() -> PromoMode:
return _dtype_conversion_mode
# TODO(b/289395872): Make sure all WeakTensor construction is guarded with this
# check.
def is_auto_dtype_conversion_enabled() -> bool:
return (
_dtype_conversion_mode == PromoMode.ALL
or _dtype_conversion_mode == PromoMode.SAFE
)
def is_numpy_style_type_promotion() -> bool:
return _dtype_conversion_mode == PromoMode.LEGACY
def set_dtype_conversion_mode(dtype_conversion_mode) -> None:
"""Enables the specified dtype conversion mode.
Args:
dtype_conversion_mode: a string that specifies dtype conversion mode. This
string corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe'
or 'all'.
"""
global _dtype_conversion_mode
_dtype_conversion_mode = _get_promo_mode_enum(dtype_conversion_mode)
def _get_promo_mode_enum(dtype_conversion_mode) -> PromoMode:
"""Returns the corresponding PromoMode enum value from string."""
if dtype_conversion_mode == "off":
return PromoMode.OFF
if dtype_conversion_mode == "legacy":
return PromoMode.LEGACY
elif dtype_conversion_mode == "safe":
return PromoMode.SAFE
elif dtype_conversion_mode == "all":
return PromoMode.ALL
else:
raise ValueError(
f"The provided promotion mode {dtype_conversion_mode} does not exist."
" Make sure the provided dtype conversion mode is one of the"
" followings: 'off', 'legacy', 'safe' or 'all'."
)
def promo_mode_enum_to_string(promo_safety_mode_enum) -> str:
"""Returns the corresponding PromoMode string value from PromoMode enum."""
if promo_safety_mode_enum == PromoMode.OFF:
return "off"
if promo_safety_mode_enum == PromoMode.LEGACY:
return "legacy"
elif promo_safety_mode_enum == PromoMode.SAFE:
return "safe"
elif promo_safety_mode_enum == PromoMode.ALL:
return "all"
else:
raise ValueError(
f"The provided promotion mode {promo_safety_mode_enum} does not exist."
)
_numpy_style_slicing: bool = False
def enable_numpy_style_slicing() -> None:
"""If called, follows NumPy's rules for slicing Tensors.
Used for enabling NumPy behavior on slicing for TF NumPy.
"""
global _numpy_style_slicing
_numpy_style_slicing = True
def set_int_list_attr(op, attr_name, ints) -> None:
"""TF internal method used to set a list(int) attribute in the node_def."""
ints_list = attr_value_pb2.AttrValue.ListValue(i=ints)
op._set_attr(attr_name, attr_value_pb2.AttrValue(list=ints_list)) # pylint:disable=protected-access
def _get_enclosing_context(graph) -> Any:
# pylint: disable=protected-access
if graph is None:
return None
if graph._control_flow_context is not None:
return graph._control_flow_context
if graph.building_function and hasattr(graph, "outer_graph"):
return _get_enclosing_context(graph.outer_graph)
# TODO(b/271463878): Remove in favor of direct references to `handle_data_util`.
get_resource_handle_data = handle_data_util.get_resource_handle_data
def _copy_handle_data_to_arg_def(tensor, arg_def) -> None:
handle_data = handle_data_util.get_resource_handle_data(tensor)
if handle_data.shape_and_type:
shape_and_type = handle_data.shape_and_type[0]
proto = arg_def.handle_data.add()
proto.dtype = shape_and_type.dtype
proto.shape.CopyFrom(handle_data.shape_and_type[0].shape)
@tf_export("is_symbolic_tensor", v1=["is_symbolic_tensor"])
def is_symbolic_tensor(tensor) -> bool:
"""Test if `tensor` is a symbolic Tensor.
Args:
tensor: a tensor-like object
Returns:
True if `tensor` is a symbolic tensor (not an eager tensor).
"""
return isinstance(tensor, SymbolicTensor)
| PromoMode |
python | ansible__ansible | hacking/create-bulk-issues.py | {
"start": 3257,
"end": 3936
} | class ____:
title: str
summary: str
component: str
labels: list[str] | None = None
def create_issue(self, project: str) -> Issue:
body = f'''
### Summary
{self.summary}
### Issue Type
Bug Report
### Component Name
`{self.component}`
### Ansible Version
{MAJOR_MINOR_VERSION}
### Configuration
N/A
### OS / Environment
N/A
### Steps to Reproduce
N/A
### Expected Results
N/A
### Actual Results
N/A
'''
return Issue(
title=self.title,
summary=self.summary,
body=body.strip(),
project=project,
labels=self.labels,
)
@dataclasses.dataclass(frozen=True)
| BugReport |
python | huggingface__transformers | src/transformers/models/glm4_moe/modeling_glm4_moe.py | {
"start": 22403,
"end": 25633
} | class ____(Glm4MoePreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"model\.layers\.92.*", r"model\.layers\.46.*"]
def __init__(self, config: Glm4MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Glm4MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Glm4MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Glm4MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| Glm4MoeModel |
python | Lightning-AI__lightning | src/lightning/pytorch/strategies/launchers/xla.py | {
"start": 1257,
"end": 7510
} | class ____(_MultiProcessingLauncher):
r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at the
end.
The main process in which this launcher is invoked creates N so-called worker processes (using the
`torch_xla` :func:`xmp.spawn`) that run the given function.
Worker processes have a rank that ranges from 0 to N - 1.
Note:
- This launcher requires all objects to be pickleable.
- It is important that the entry point to the program/script is guarded by ``if __name__ == "__main__"``.
Args:
strategy: A reference to the strategy that is used together with this launcher
"""
def __init__(self, strategy: "pl.strategies.XLAStrategy") -> None:
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
super().__init__(strategy=strategy, start_method="fork")
@property
@override
def is_interactive_compatible(self) -> bool:
return True
@override
def launch(self, function: Callable, *args: Any, trainer: Optional["pl.Trainer"] = None, **kwargs: Any) -> Any:
"""Launches processes that run the given function in parallel.
The function is allowed to have a return value. However, when all processes join, only the return value
of worker process 0 gets returned from this `launch` method in the main process.
Arguments:
function: The entry point for all launched processes.
*args: Optional positional arguments to be passed to the given function.
trainer: Optional reference to the :class:`~lightning.pytorch.trainer.trainer.Trainer` for which
a selected set of attributes get restored in the main process after processes join.
**kwargs: Optional keyword arguments to be passed to the given function.
"""
if self._already_fit and trainer is not None and trainer.state.fn == TrainerFn.FITTING:
# resolving https://github.com/Lightning-AI/pytorch-lightning/issues/18775 will lift this restriction
raise NotImplementedError(
"Calling `trainer.fit()` twice on the same Trainer instance using a spawn-based strategy is not"
" supported. You can work around this by creating a new Trainer instance and passing the"
" `fit(ckpt_path=...)` argument."
)
# pjrt requires that the queue is serializable
return_queue = mp.Manager().Queue()
import torch_xla.distributed.xla_multiprocessing as xmp
spawn_kwargs = {}
nprocs = self._strategy.num_processes
if nprocs == 1:
# avoid warning: "Unsupported nprocs". If it's 1, it will call the launched function directly.
# otherwise it will use all devices
spawn_kwargs["nprocs"] = nprocs
process_context = xmp.spawn(
self._wrapping_function,
args=(trainer, function, args, kwargs, return_queue),
start_method=self._start_method,
join=False, # we will join ourselves to get the process references
**spawn_kwargs,
)
# xla will not actually create processes if only 1 device
if process_context is not None:
self.procs = process_context.processes
while not process_context.join():
pass
worker_output = return_queue.get()
if trainer is None:
return worker_output
self._already_fit |= trainer.state.fn == TrainerFn.FITTING
self._recover_results_in_main_process(worker_output, trainer)
return worker_output.trainer_results
@override
def _wrapping_function(
self,
# XLA's multiprocessing returns the global index, not the local index as torch's multiprocessing
# https://github.com/pytorch/xla/blob/v1.13.0/torch_xla/distributed/xla_multiprocessing.py#L321
process_idx: int,
trainer: Optional["pl.Trainer"],
function: Callable,
args: Any,
kwargs: Any,
return_queue: Union[mp.SimpleQueue, queue.Queue],
global_states: Optional[_GlobalStateSnapshot] = None,
) -> None:
import torch_xla.core.xla_model as xm
if len(xm.get_xla_supported_devices()) > 1:
# `get_xla_supported_devices` in the spawned process returns the logical devices (2 for v2/v3 and 1 for v4)
# so when there's more than one (multithreading), objects need to be deep-copied
import copy
trainer, function, args, kwargs = copy.deepcopy((trainer, function, args, kwargs))
results = function(*args, **kwargs)
if trainer is not None:
results = self._collect_rank_zero_results(trainer, results)
if self._strategy.local_rank == 0:
return_queue.put(move_data_to_device(results, "cpu"))
_rank_teardown(self._strategy.local_rank)
@override
def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Optional["_WorkerOutput"]:
rank_zero_debug("Collecting results from rank 0 process.")
checkpoint_callback = trainer.checkpoint_callback
best_model_path = (
checkpoint_callback.best_model_path
if checkpoint_callback and hasattr(checkpoint_callback, "best_model_path")
else None
)
# save the last weights
weights_path = None
if trainer.state.fn == TrainerFn.FITTING:
# requires to compute the state_dict on all processes in case Metrics are present
state_dict = self._strategy.lightning_module_state_dict()
weights_path = os.path.join(trainer.default_root_dir, ".temp.ckpt")
self._strategy.checkpoint_io.save_checkpoint(state_dict, weights_path)
# We use `local_rank` here as separate filesystems are used for each VM for TPU Pod Training
if self._strategy.local_rank != 0:
return None
# add extra result data from trainer to send to main process
extra = self.get_extra_results(trainer)
return _WorkerOutput(best_model_path, weights_path, trainer.state, results, extra)
| _XLALauncher |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/manager/base.py | {
"start": 1003,
"end": 1660
} | class ____(logging.Handler):
def __init__(self, client, *args, **kwargs):
self._client = client
super().__init__(*args, **kwargs)
log_format = "%(message)s — %(asctime)s"
formatter = logging.Formatter(log_format, datefmt="%H:%M:%S %d/%m/%Y")
self.setFormatter(formatter)
def emit(self, record):
self._client._plugin.sig_client_message_logged.emit(
RemoteClientLog(
id=self._client.config_id,
message=self.format(record),
level=record.levelno,
created=record.created,
)
)
| SpyderRemoteAPILoggerHandler |
python | readthedocs__readthedocs.org | readthedocs/core/unresolver.py | {
"start": 1362,
"end": 1571
} | class ____(UnresolverError):
def __init__(self, project, version_slug, filename):
self.project = project
self.version_slug = version_slug
self.filename = filename
| VersionNotFoundError |
python | walkccc__LeetCode | solutions/7. Reverse Integer/7.py | {
"start": 0,
"end": 232
} | class ____:
def reverse(self, x: int) -> int:
ans = 0
sign = -1 if x < 0 else 1
x *= sign
while x:
ans = ans * 10 + x % 10
x //= 10
return 0 if ans < -2**31 or ans > 2**31 - 1 else sign * ans
| Solution |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/extractors/base.py | {
"start": 1707,
"end": 2055
} | class ____(Generic[DatasetSubclass, BaseFacetSubclass]):
"""Structure returned from lineage extraction."""
inputs: list[DatasetSubclass] = Factory(list)
outputs: list[DatasetSubclass] = Factory(list)
run_facets: dict[str, BaseFacetSubclass] = Factory(dict)
job_facets: dict[str, BaseFacetSubclass] = Factory(dict)
| OperatorLineage |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_release_deploys.py | {
"start": 4902,
"end": 16915
} | class ____(APITestCase):
def setUp(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
self.org = self.create_organization()
self.org.save()
team = self.create_team(organization=self.org)
self.project = self.create_project(name="foo", organization=self.org, teams=[team])
self.create_member(teams=[team], user=user, organization=self.org)
self.login_as(user=user)
def test_simple(self) -> None:
release = Release.objects.create(organization_id=self.org.id, version="1", total_deploys=0)
release.add_project(self.project)
environment = Environment.objects.create(organization_id=self.org.id, name="production")
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
response = self.client.post(
url, data={"name": "foo", "environment": "production", "url": "https://www.example.com"}
)
assert response.status_code == 201, response.content
assert response.data["name"] == "foo"
assert response.data["url"] == "https://www.example.com"
assert response.data["environment"] == "production"
deploy = Deploy.objects.get(id=response.data["id"])
assert deploy.name == "foo"
assert deploy.environment_id == environment.id
assert deploy.url == "https://www.example.com"
assert deploy.release == release
release = Release.objects.get(id=release.id)
assert release.total_deploys == 1
assert release.last_deploy_id == deploy.id
rpe = ReleaseProjectEnvironment.objects.get(
project=self.project, release=release, environment=environment
)
assert rpe.last_deploy_id == deploy.id
def test_with_project_slugs(self) -> None:
project_bar = self.create_project(organization=self.org, name="bar")
release = Release.objects.create(organization_id=self.org.id, version="1", total_deploys=0)
release.add_project(self.project)
release.add_project(project_bar)
environment = Environment.objects.create(organization_id=self.org.id, name="production")
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
response = self.client.post(
url,
data={
"name": "foo_bar",
"environment": "production",
"url": "https://www.example.com",
"projects": [self.project.slug, project_bar.slug],
},
)
assert response.status_code == 201, response.content
assert response.data["name"] == "foo_bar"
assert response.data["url"] == "https://www.example.com"
assert response.data["environment"] == "production"
deploy = Deploy.objects.get(id=response.data["id"])
assert deploy.name == "foo_bar"
assert deploy.environment_id == environment.id
assert deploy.url == "https://www.example.com"
assert deploy.release == release
release = Release.objects.get(id=release.id)
assert release.total_deploys == 1
assert release.last_deploy_id == deploy.id
rpe = ReleaseProjectEnvironment.objects.get(
project=self.project, release=release, environment=environment
)
assert rpe.last_deploy_id == deploy.id
rpe = ReleaseProjectEnvironment.objects.get(
project=project_bar, release=release, environment=environment
)
assert rpe.last_deploy_id == deploy.id
def test_with_multiple_projects(self) -> None:
"""
Test that when a release is associated with multiple projects the user is still able to create
a deploy to only one project
"""
project_bar = self.create_project(organization=self.org, name="bar")
release = Release.objects.create(organization_id=self.org.id, version="1", total_deploys=0)
release.add_project(self.project)
release.add_project(project_bar)
environment = Environment.objects.create(organization_id=self.org.id, name="production")
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
response = self.client.post(
url,
data={
"name": "foo_bar",
"environment": "production",
"url": "https://www.example.com",
"projects": [project_bar.slug],
},
)
assert response.status_code == 201, response.content
assert response.data["name"] == "foo_bar"
assert response.data["url"] == "https://www.example.com"
assert response.data["environment"] == "production"
deploy = Deploy.objects.get(id=response.data["id"])
assert deploy.name == "foo_bar"
assert deploy.environment_id == environment.id
assert deploy.url == "https://www.example.com"
assert deploy.release == release
release = Release.objects.get(id=release.id)
assert release.total_deploys == 1
assert release.last_deploy_id == deploy.id
assert not ReleaseProjectEnvironment.objects.filter(
project=self.project, release=release, environment=environment
).exists()
rpe = ReleaseProjectEnvironment.objects.get(
project=project_bar, release=release, environment=environment
)
assert rpe.last_deploy_id == deploy.id
def test_with_project_ids(self) -> None:
project_bar = self.create_project(organization=self.org, name="bar")
release = Release.objects.create(organization_id=self.org.id, version="1", total_deploys=0)
release.add_project(self.project)
release.add_project(project_bar)
environment = Environment.objects.create(organization_id=self.org.id, name="production")
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
response = self.client.post(
url,
data={
"name": "foo_bar",
"environment": "production",
"url": "https://www.example.com",
"projects": [self.project.id, project_bar.id],
},
)
assert response.status_code == 201, response.content
assert response.data["name"] == "foo_bar"
assert response.data["url"] == "https://www.example.com"
assert response.data["environment"] == "production"
deploy = Deploy.objects.get(id=response.data["id"])
assert deploy.name == "foo_bar"
assert deploy.environment_id == environment.id
assert deploy.url == "https://www.example.com"
assert deploy.release == release
release = Release.objects.get(id=release.id)
assert release.total_deploys == 1
assert release.last_deploy_id == deploy.id
rpe = ReleaseProjectEnvironment.objects.get(
project=self.project, release=release, environment=environment
)
assert rpe.last_deploy_id == deploy.id
rpe = ReleaseProjectEnvironment.objects.get(
project=project_bar, release=release, environment=environment
)
assert rpe.last_deploy_id == deploy.id
def test_with_invalid_project_slug(self) -> None:
bar_project = self.create_project(organization=self.org, name="bar")
release = Release.objects.create(organization_id=self.org.id, version="1", total_deploys=0)
release.add_project(self.project)
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
response = self.client.post(
url,
data={
"name": "foo",
"environment": "production",
"url": "https://www.example.com",
"projects": [bar_project.slug],
},
)
assert response.status_code == 400, response.content
assert response.data["detail"]["code"] == "parameter-validation-error"
assert "Invalid projects" in response.data["detail"]["message"]
assert 0 == Deploy.objects.count()
def test_environment_validation_failure(self) -> None:
release = Release.objects.create(
organization_id=self.org.id, version="123", total_deploys=0
)
release.add_project(self.project)
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
response = self.client.post(
url, data={"name": "foo", "environment": "bad/name", "url": "https://www.example.com"}
)
assert response.status_code == 400, response.content
assert 0 == Deploy.objects.count()
def test_api_token_with_project_releases_scope(self) -> None:
"""
Test that tokens with `project:releases` scope can create deploys for only one project
when the release is associated with multiple projects.
"""
# Create a second project
project_bar = self.create_project(organization=self.org, name="bar")
# Create a release for both projects
release = Release.objects.create(organization_id=self.org.id, version="1", total_deploys=0)
release.add_project(self.project)
release.add_project(project_bar)
# Create API token with project:releases scope
user = self.create_user(is_staff=False, is_superuser=False)
# Add user to the organization - they need to be a member to use the API
self.create_member(user=user, organization=self.org)
with assume_test_silo_mode(SiloMode.CONTROL):
api_token = ApiToken.objects.create(user=user, scope_list=["project:releases"])
url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": self.org.slug,
"version": release.version,
},
)
# Create deploy for only one project (project_bar)
response = self.client.post(
url,
data={
"name": "single_project_deploy",
"environment": "production",
"url": "https://www.example.com",
"projects": [project_bar.slug], # Only one project specified
},
HTTP_AUTHORIZATION=f"Bearer {api_token.token}",
)
assert response.status_code == 201, response.content
assert response.data["name"] == "single_project_deploy"
assert response.data["environment"] == "production"
environment = Environment.objects.get(name="production", organization_id=self.org.id)
# Verify ReleaseProjectEnvironment was created only for project_bar
assert ReleaseProjectEnvironment.objects.filter(
project=project_bar, release=release, environment=environment
).exists()
# Verify ReleaseProjectEnvironment was NOT created for self.project
assert not ReleaseProjectEnvironment.objects.filter(
project=self.project, release=release, environment=environment
).exists()
| ReleaseDeploysCreateTest |
python | huggingface__transformers | src/transformers/models/llava_onevision/modular_llava_onevision.py | {
"start": 1922,
"end": 9127
} | class ____(LlavaNextImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
crop_size = None
default_to_square = False
do_resize = True
do_center_crop = None
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_pad = True
image_grid_pinpoints = [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] # fmt: skip
model_input_names = ["pixel_values", "image_sizes", "batch_num_images"]
# Copied from transformers.models.llava.image_processing_llava_fast.LlavaImageProcessorFast.pad_to_square
def pad_to_square(
self,
images: "torch.Tensor",
background_color: Union[int, tuple[int, int, int]] = 0,
) -> "torch.Tensor":
"""
Pads an image to a square based on the longest edge.
Args:
images (`np.ndarray`):
The images to pad.
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in multi-channel mode, it will default to `0` in subsequent channels.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = get_image_size(images, ChannelDimension.FIRST)
if height == width:
return images
num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]
if isinstance(background_color, int):
background_color = [background_color] + [0] * (num_channels - 1)
elif len(background_color) != num_channels:
raise ValueError(
f"background_color must have no more than {num_channels} elements to match the number of channels"
)
max_dim = max(height, width)
paste_x_left = (max_dim - width) // 2
paste_y_left = (max_dim - height) // 2
paste_x_right = max_dim - width - paste_x_left
paste_y_right = max_dim - height - paste_y_left
padded_images = F.pad(
images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color
)
return padded_images
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionImageProcessorKwargs]) -> BatchFeature:
if isinstance(images, (tuple, list)) and isinstance(images[0], (tuple, list)):
# if the first element is a list, we assume that all elements are lists
batch_num_images = [len(x) for x in images]
elif isinstance(images, (tuple, list)):
# treat this as a single-image case for backward compatibility
batch_num_images = [1] * len(images)
else:
batch_num_images = [1]
return BaseImageProcessorFast.preprocess(images, batch_num_images, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
batch_num_images: list[int],
do_resize: bool,
size: SizeDict,
image_grid_pinpoints: list[list[int]],
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: bool,
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
processed_images = []
image_sizes = []
# only single image patching is supported
need_patching = [n == 1 for n in batch_num_images for _ in range(n)]
# Determine the size tuple
if size and size.height and size.width:
size_tuple = (size.height, size.width)
else:
size_tuple = (size.shortest_edge, size.shortest_edge)
# Determine the patch size
if crop_size and crop_size.height:
patch_size = crop_size.height
elif size and size.height:
patch_size = size.height
else:
patch_size = size.shortest_edge
for i, image in enumerate(images):
if need_patching[i]:
image_patches = self._get_image_patches(
image,
image_grid_pinpoints,
size=size_tuple,
patch_size=patch_size,
interpolation=interpolation,
)
else:
padded_image = self.pad_to_square(
images=image, background_color=tuple(int(x * 255) for x in self.image_mean)
)
image_patches = [padded_image]
# Group images by size for batched processing
processed_image_patches_grouped = {}
grouped_image_patches, grouped_image_patches_index = group_images_by_shape(
image_patches, disable_grouping=disable_grouping
)
for shape, stacked_image_patches in grouped_image_patches.items():
if do_resize:
stacked_image_patches = self.resize(
image=stacked_image_patches,
size=size,
interpolation=interpolation,
)
if do_center_crop:
stacked_image_patches = self.center_crop(stacked_image_patches, crop_size)
# Fused rescale and normalize
stacked_image_patches = self.rescale_and_normalize(
stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_image_patches_grouped[shape] = stacked_image_patches
processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index)
processed_image_patches = torch.stack(processed_image_patches, dim=0)
processed_images.append(processed_image_patches)
image_sizes.append(get_image_size(image, ChannelDimension.FIRST))
if do_pad:
processed_images = self._pad_for_batching(processed_images)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(
data={"pixel_values": processed_images, "image_sizes": image_sizes, "batch_num_images": batch_num_images},
tensor_type=return_tensors,
)
| LlavaOnevisionImageProcessorFast |
python | huggingface__transformers | src/transformers/models/longformer/modeling_longformer.py | {
"start": 63096,
"end": 63790
} | class ____(nn.Module):
"""Longformer Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@auto_docstring
| LongformerLMHead |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v2_utils.py | {
"start": 61254,
"end": 67942
} | class ____:
"""Configuration data for one embedding feature.
This class holds the configuration data for a single embedding feature. The
main use is to assign features to `tf.tpu.experimental.embedding.TableConfig`s
via the table parameter:
```python
table_config_one = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...)
table_config_two = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...)
feature_config = {
'feature_one': tf.tpu.experimental.embedding.FeatureConfig(
table=table_config_one),
'feature_two': tf.tpu.experimental.embedding.FeatureConfig(
table=table_config_one),
'feature_three': tf.tpu.experimental.embedding.FeatureConfig(
table=table_config_two)}
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
batch_size=...
optimizer=tf.tpu.experimental.embedding.Adam(0.1))
```
The above configuration has 2 tables, and three features. The first two
features will be looked up in the first table and the third feature will be
looked up in the second table.
You can also specify the output shape for each feature. The output shape
should be the expected activation shape excluding the table dimension. For
dense and sparse tensor, the output shape should be the same as the input
shape excluding the last dimension. For ragged tensor, the output shape can
mismatch the input shape.
NOTE: The `max_sequence_length` will be only used when the input tensor has
rank 2 and the `output_shape` is not set in the feature config.
When feeding features into `embedding.enqueue` they can be `tf.Tensor`s,
`tf.SparseTensor`s or `tf.RaggedTensor`s. When the argument
`max_sequence_length` is 0, the default, you should expect a output of
`embedding.dequeue` for this feature of shape `(batch_size, dim)`. If
`max_sequence_length` is greater than 0, the feature is embedded as a sequence
and padded up to the given length. The shape of the output for this feature
will be `(batch_size, max_sequence_length, dim)`.
"""
def __init__(self,
table: TableConfig,
max_sequence_length: int = 0,
validate_weights_and_indices: bool = True,
output_shape: Optional[Union[List[int], TensorShape]] = None,
name: Optional[Text] = None):
"""Feature configuration.
Args:
table: An instance of `tf.tpu.experimental.embedding.TableConfig`,
describing the table in which this feature should be looked up.
max_sequence_length: If positive, the feature is a sequence feature with
the corresponding maximum sequence length. If the sequence is longer
than this, it will be truncated. If 0, the feature is not a sequence
feature.
validate_weights_and_indices: If true, uses safe_embedding_lookup during
serving which ensures there are no empty rows and all weights and ids
are positive at the expense of extra compute cost.
output_shape: Optional argument to config the output shape of the feature
activation. If provided, the feature feeding to the `embedding.enqueue`
has to match the shape (for ragged tensor, the input shape and output
shape can mismatch). If not provided, the shape can be either provided
to the `embedding.build` or auto detected at the runtime.
name: An optional string used to name the table. Must be defined if
running on SparseCore.
Returns:
`FeatureConfig`.
Raises:
ValueError: if `table` is not an instance of
`tf.tpu.experimental.embedding.TableConfig`.
ValueError: if `max_sequence_length` not an integer or is negative.
"""
if not isinstance(table, TableConfig):
raise ValueError(f"Argument `table` has invalid type {type(table)}. "
"Expected `tf.tpu.experimental.embedding.TableConfig`.")
if not isinstance(max_sequence_length, int) or max_sequence_length < 0:
raise ValueError(
f"Argument `max_sequence_length` must be an int and must be >= 0. "
f"Received: {max_sequence_length}")
self.table = table
self.max_sequence_length = max_sequence_length
self.name = name
self.output_shape = TensorShape(output_shape)
if not isinstance(
validate_weights_and_indices, bool):
raise ValueError(
f"Argument `validate_weights_and_indices` must be a boolean. "
f"Received: {validate_weights_and_indices}")
self.validate_weights_and_indices = validate_weights_and_indices
def __repr__(self):
return ("FeatureConfig(table={table!r}, "
"max_sequence_length={max_sequence_length!r}, "
"validate_weights_and_indices={validate_weights_and_indices!r}, "
"output_shape={output_shape!r}, name={name!r})".format(
table=self.table,
max_sequence_length=self.max_sequence_length,
validate_weights_and_indices=self.validate_weights_and_indices,
output_shape=self.output_shape,
name=self.name))
def log_tpu_embedding_configuration(
config: tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration) -> None:
"""Logs a TPUEmbeddingConfiguration proto across multiple statements.
Args:
config: TPUEmbeddingConfiguration proto to log. Necessary because
logging.info has a maximum length to each log statement, which
particularly large configs can exceed.
"""
logging.info("Beginning log of TPUEmbeddingConfiguration.")
for line in str(config).splitlines():
logging.info(line)
logging.info("Done with log of TPUEmbeddingConfiguration.")
def _sort_device_spec_strings(device_strings: Iterable[str]) -> List[str]:
sorted_specs = sorted(
(device_spec.DeviceSpecV2.from_string(spec) for spec in device_strings),
key=lambda s: (s.replica, s.task, s.device_index),
)
return [spec.to_string() for spec in sorted_specs]
def get_list_of_hosts(strategy: tpu_strategy.TPUStrategy) -> List[Text]:
"""Returns a sorted list of CPU devices for the remote jobs.
Args:
strategy: A TPUStrategy object.
Returns:
A sorted list of device host strings.
"""
list_of_hosts = []
# Elsewehere we assume that the list of hosts is sorted.
for tpu_device in _sort_device_spec_strings(strategy.extended.worker_devices):
host = device_util.get_host_for_device(tpu_device)
if host not in list_of_hosts:
list_of_hosts.append(host)
assert len(list_of_hosts) == strategy.extended.num_hosts
return list_of_hosts
| FeatureConfig |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 230169,
"end": 232040
} | class ____(_fixtures.FixtureTest):
"""POC test for both #7153 and #7154"""
run_inserts = "once"
run_deletes = None
__sparse_driver_backend__ = True
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_limited_eager_w_null(self):
User = self.classes.User
Address = self.classes.Address
stmt = (
select(User, null())
.options(joinedload(User.addresses))
.where(User.id == 8)
.limit(10)
)
session = fixture_session()
def go():
eq_(
session.execute(stmt).unique().all(),
[
(
User(
id=8, addresses=[Address(), Address(), Address()]
),
None,
)
],
)
self.assert_sql_count(testing.db, go, 1)
def test_limited_eager_w_multi_null_booleans(self):
User = self.classes.User
Address = self.classes.Address
stmt = (
select(User, null(), null(), null(), true(), true())
.options(joinedload(User.addresses))
.where(User.id == 8)
.limit(10)
)
session = fixture_session()
def go():
eq_(
session.execute(stmt).unique().all(),
[
(
User(
id=8, addresses=[Address(), Address(), Address()]
),
None,
None,
None,
True,
True,
)
],
)
self.assert_sql_count(testing.db, go, 1)
| SingletonConstantSubqTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1180576,
"end": 1180789
} | class ____(ColorScheme):
"""SequentialSingleHue schema wrapper."""
_schema = {"$ref": "#/definitions/SequentialSingleHue"}
def __init__(self, *args):
super().__init__(*args)
| SequentialSingleHue |
python | sqlalchemy__sqlalchemy | test/sql/test_compiler.py | {
"start": 7003,
"end": 132891
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_compiler_column_element_is_slots(self):
class SomeColThing(CompilerColumnElement):
__slots__ = ("name",)
__visit_name__ = "some_col_thing"
def __init__(self, name):
self.name = name
c1 = SomeColThing("some name")
eq_(c1.name, "some name")
assert not hasattr(c1, "__dict__")
def test_compile_label_is_slots(self):
c1 = compiler._CompileLabel(column("q"), "somename")
eq_(c1.name, "somename")
assert not hasattr(c1, "__dict__")
def test_attribute_sanity(self):
assert hasattr(table1, "c")
assert hasattr(table1.select().subquery(), "c")
assert not hasattr(table1.c.myid.self_group(), "columns")
assert not hasattr(table1.c.myid, "columns")
assert not hasattr(table1.c.myid, "c")
assert not hasattr(table1.select().subquery().c.myid, "c")
assert not hasattr(table1.select().subquery().c.myid, "columns")
assert not hasattr(table1.alias().c.myid, "columns")
assert not hasattr(table1.alias().c.myid, "c")
assert_raises_message(
exc.InvalidRequestError,
"Scalar Select expression has no "
"columns; use this object directly within a "
"column-level expression.",
getattr,
select(table1.c.myid).scalar_subquery().self_group(),
"columns",
)
assert_raises_message(
exc.InvalidRequestError,
"Scalar Select expression has no "
"columns; use this object directly within a "
"column-level expression.",
getattr,
select(table1.c.myid).scalar_subquery(),
"columns",
)
def test_table_select(self):
self.assert_compile(
table1.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable",
)
self.assert_compile(
select(table1, table2),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable",
)
def test_int_limit_offset_coercion(self):
for given, exp in [
("5", 5),
(5, 5),
(5.2, 5),
(decimal.Decimal("5"), 5),
(None, None),
]:
eq_(select().limit(given)._limit, exp)
eq_(select().offset(given)._offset, exp)
assert_raises(ValueError, select().limit, "foo")
assert_raises(ValueError, select().offset, "foo")
def test_limit_offset_no_int_coercion_one(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
self.assert_compile(
select(1).limit(exp1).offset(exp2), "SELECT 1 LIMIT Q OFFSET Y"
)
self.assert_compile(
select(1).limit(bindparam("x")).offset(bindparam("y")),
"SELECT 1 LIMIT :x OFFSET :y",
)
def test_limit_offset_no_int_coercion_two(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
sel = select(1).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr,
sel,
"_limit",
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr,
sel,
"_offset",
)
def test_limit_offset_no_int_coercion_three(self):
exp1 = bindparam("Q")
exp2 = bindparam("Y")
sel = select(1).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr,
sel,
"_limit",
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr,
sel,
"_offset",
)
@testing.combinations(
(
5,
10,
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 5, "param_2": 10},
),
(None, 10, "LIMIT -1 OFFSET :param_1", {"param_1": 10}),
(5, None, "LIMIT :param_1", {"param_1": 5}),
(
0,
0,
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 0, "param_2": 0},
),
(
literal_column("Q"),
literal_column("Y"),
"LIMIT Q OFFSET Y",
{},
),
(
column("Q"),
column("Y"),
'LIMIT "Q" OFFSET "Y"',
{},
),
)
def test_limit_offset(self, lim, offset, exp, params):
self.assert_compile(
select(1).limit(lim).offset(offset),
"SELECT 1 " + exp,
checkparams=params,
)
@testing.combinations(
(
5,
10,
{},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS ONLY",
{"param_1": 10, "param_2": 5},
),
(None, 10, {}, "LIMIT -1 OFFSET :param_1", {"param_1": 10}),
(
5,
None,
{},
"FETCH FIRST :param_1 ROWS ONLY",
{"param_1": 5},
),
(
0,
0,
{},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS ONLY",
{"param_1": 0, "param_2": 0},
),
(
5,
10,
{"percent": True},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 PERCENT ROWS ONLY",
{"param_1": 10, "param_2": 5},
),
(
5,
10,
{"percent": True, "with_ties": True},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 PERCENT ROWS WITH TIES",
{"param_1": 10, "param_2": 5},
),
(
5,
10,
{"with_ties": True},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS WITH TIES",
{"param_1": 10, "param_2": 5},
),
(
literal_column("Q"),
literal_column("Y"),
{},
"OFFSET Y ROWS FETCH FIRST Q ROWS ONLY",
{},
),
(
column("Q"),
column("Y"),
{},
'OFFSET "Y" ROWS FETCH FIRST "Q" ROWS ONLY',
{},
),
(
bindparam("Q", 3),
bindparam("Y", 7),
{},
"OFFSET :Y ROWS FETCH FIRST :Q ROWS ONLY",
{"Q": 3, "Y": 7},
),
(
literal_column("Q") + literal_column("Z"),
literal_column("Y") + literal_column("W"),
{},
"OFFSET Y + W ROWS FETCH FIRST Q + Z ROWS ONLY",
{},
),
)
def test_fetch(self, fetch, offset, fetch_kw, exp, params):
self.assert_compile(
select(1).fetch(fetch, **fetch_kw).offset(offset),
"SELECT 1 " + exp,
checkparams=params,
)
def test_fetch_limit_offset_self_group(self):
self.assert_compile(
select(1).limit(1).self_group(),
"(SELECT 1 LIMIT :param_1)",
checkparams={"param_1": 1},
)
self.assert_compile(
select(1).offset(1).self_group(),
"(SELECT 1 LIMIT -1 OFFSET :param_1)",
checkparams={"param_1": 1},
)
self.assert_compile(
select(1).fetch(1).self_group(),
"(SELECT 1 FETCH FIRST :param_1 ROWS ONLY)",
checkparams={"param_1": 1},
)
def test_limit_fetch_interaction(self):
self.assert_compile(
select(1).limit(42).fetch(1),
"SELECT 1 FETCH FIRST :param_1 ROWS ONLY",
checkparams={"param_1": 1},
)
self.assert_compile(
select(1).fetch(42).limit(1),
"SELECT 1 LIMIT :param_1",
checkparams={"param_1": 1},
)
self.assert_compile(
select(1).limit(42).offset(7).fetch(1),
"SELECT 1 OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS ONLY",
checkparams={"param_1": 7, "param_2": 1},
)
self.assert_compile(
select(1).fetch(1).slice(2, 5),
"SELECT 1 LIMIT :param_1 OFFSET :param_2",
checkparams={"param_1": 3, "param_2": 2},
)
self.assert_compile(
select(1).slice(2, 5).fetch(1),
"SELECT 1 OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS ONLY",
checkparams={"param_1": 2, "param_2": 1},
)
def test_select_precol_compile_ordering(self):
s1 = (
select(column("x"))
.select_from(text("a"))
.limit(5)
.scalar_subquery()
)
s2 = select(s1).limit(10)
class MyCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select, **kw):
result = ""
if select._limit:
result += "FIRST %s " % self.process(
literal(select._limit), **kw
)
if select._offset:
result += "SKIP %s " % self.process(
literal(select._offset), **kw
)
return result
def limit_clause(self, select, **kw):
return ""
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
dialect.paramstyle = "qmark"
dialect.positional = True
self.assert_compile(
s2,
"SELECT FIRST ? (SELECT FIRST ? x FROM a) AS anon_1",
checkpositional=(10, 5),
dialect=dialect,
)
@testing.combinations(
(
select(table1.c.name)
.select_from(table1, table2)
.where(table1.c.myid == table2.c.otherid),
"SELECT mytable.name FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table1.c.name)
.select_from(table2, table1)
.where(table1.c.myid == table2.c.otherid),
"SELECT mytable.name FROM myothertable, mytable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table2, table1),
"SELECT mytable.name FROM myothertable, mytable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table1, table2),
"SELECT mytable.name FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table3.c.userid, table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table1, table3, table2),
"SELECT thirdtable.userid, mytable.name "
"FROM mytable, thirdtable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table3.c.userid, table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table3, table1, table2),
"SELECT thirdtable.userid, mytable.name "
"FROM thirdtable, mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table3.c.userid, table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table1, table2),
"SELECT thirdtable.userid, mytable.name "
"FROM mytable, myothertable, thirdtable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table3.c.userid, table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table3, table2),
"SELECT thirdtable.userid, mytable.name "
"FROM thirdtable, myothertable, mytable "
"WHERE mytable.myid = myothertable.otherid",
),
(
select(table3.c.userid, table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table3, table2)
.join_from(table3, table1, table3.c.userid == table1.c.myid),
"SELECT thirdtable.userid, mytable.name "
"FROM thirdtable "
"JOIN mytable ON thirdtable.userid = mytable.myid, "
"myothertable WHERE mytable.myid = myothertable.otherid",
),
(
select(table3.c.userid, table1.c.name)
.where(table1.c.myid == table2.c.otherid)
.select_from(table2, table3)
.join_from(table3, table1, table3.c.userid == table1.c.myid),
"SELECT thirdtable.userid, mytable.name "
"FROM myothertable, thirdtable "
"JOIN mytable ON thirdtable.userid = mytable.myid "
"WHERE mytable.myid = myothertable.otherid",
),
)
def test_select_from_ordering(self, stmt, expected):
self.assert_compile(stmt, expected)
def test_from_subquery(self):
"""tests placing select statements in the column clause of
another select, for the
purposes of selecting from the exported columns of that select."""
s = select(table1).where(table1.c.name == "jack").subquery()
self.assert_compile(
select(s).where(s.c.myid == 7),
"SELECT anon_1.myid, anon_1.name, anon_1.description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description AS description "
"FROM mytable "
"WHERE mytable.name = :name_1) AS anon_1 WHERE "
"anon_1.myid = :myid_1",
)
sq = select(table1)
self.assert_compile(
sq.subquery().select(),
"SELECT anon_1.myid, anon_1.name, anon_1.description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable) AS anon_1",
)
sq = select(table1).alias("sq")
self.assert_compile(
sq.select().where(sq.c.myid == 7),
"SELECT sq.myid, sq.name, sq.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS sq "
"WHERE sq.myid = :myid_1",
)
sq = (
select(table1, table2)
.where(and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("sq")
)
sqstring = (
"SELECT mytable.myid AS mytable_myid, mytable.name AS "
"mytable_name, mytable.description AS mytable_description, "
"myothertable.otherid AS myothertable_otherid, "
"myothertable.othername AS myothertable_othername FROM "
"mytable, myothertable WHERE mytable.myid = :myid_1 AND "
"myothertable.otherid = mytable.myid"
)
self.assert_compile(
sq.select(),
"SELECT sq.mytable_myid, sq.mytable_name, "
"sq.mytable_description, sq.myothertable_otherid, "
"sq.myothertable_othername FROM (%s) AS sq" % sqstring,
)
sq2 = (
select(sq)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("sq2")
)
self.assert_compile(
sq2.select(),
"SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, "
"sq2.sq_mytable_description, sq2.sq_myothertable_otherid, "
"sq2.sq_myothertable_othername FROM "
"(SELECT sq.mytable_myid AS "
"sq_mytable_myid, sq.mytable_name AS sq_mytable_name, "
"sq.mytable_description AS sq_mytable_description, "
"sq.myothertable_otherid AS sq_myothertable_otherid, "
"sq.myothertable_othername AS sq_myothertable_othername "
"FROM (%s) AS sq) AS sq2" % sqstring,
)
def test_select_from_clauselist(self):
self.assert_compile(
select(ClauseList(column("a"), column("b"))).select_from(
text("sometable")
),
"SELECT a, b FROM sometable",
)
def test_use_labels(self):
self.assert_compile(
select(table1.c.myid == 5).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable",
)
self.assert_compile(
select(func.foo()).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT foo() AS foo_1",
)
# this is native_boolean=False for default dialect
self.assert_compile(
select(not_(True)).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT :param_1 = 0 AS anon_1",
)
self.assert_compile(
select(cast("data", Integer)).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT CAST(:param_1 AS INTEGER) AS anon_1",
)
self.assert_compile(
select(
func.sum(func.lala(table1.c.myid).label("foo")).label("bar")
),
"SELECT sum(lala(mytable.myid)) AS bar FROM mytable",
)
def test_use_labels_keyed(self):
self.assert_compile(
select(keyed), "SELECT keyed.x, keyed.y, keyed.z FROM keyed"
)
self.assert_compile(
select(keyed).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT keyed.x AS keyed_x, keyed.y AS "
"keyed_y, keyed.z AS keyed_z FROM keyed",
)
self.assert_compile(
select(
select(keyed)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT anon_1.keyed_x AS anon_1_keyed_x, "
"anon_1.keyed_y AS anon_1_keyed_y, "
"anon_1.keyed_z AS anon_1_keyed_z "
"FROM (SELECT keyed.x AS keyed_x, keyed.y AS keyed_y, "
"keyed.z AS keyed_z FROM keyed) AS anon_1",
)
def test_paramstyles(self):
stmt = text("select :foo, :bar, :bat from sometable")
self.assert_compile(
stmt,
"select ?, ?, ? from sometable",
dialect=default.DefaultDialect(paramstyle="qmark"),
)
self.assert_compile(
stmt,
"select :foo, :bar, :bat from sometable",
dialect=default.DefaultDialect(paramstyle="named"),
)
self.assert_compile(
stmt,
"select %s, %s, %s from sometable",
dialect=default.DefaultDialect(paramstyle="format"),
)
self.assert_compile(
stmt,
"select :1, :2, :3 from sometable",
dialect=default.DefaultDialect(paramstyle="numeric"),
)
self.assert_compile(
stmt,
"select %(foo)s, %(bar)s, %(bat)s from sometable",
dialect=default.DefaultDialect(paramstyle="pyformat"),
)
def test_anon_param_name_on_keys(self):
self.assert_compile(
keyed.insert(),
"INSERT INTO keyed (x, y, z) VALUES (%(colx)s, %(coly)s, %(z)s)",
dialect=default.DefaultDialect(paramstyle="pyformat"),
)
self.assert_compile(
keyed.c.coly == 5,
"keyed.y = %(coly_1)s",
checkparams={"coly_1": 5},
dialect=default.DefaultDialect(paramstyle="pyformat"),
)
def test_dupe_columns(self):
"""as of 1.4, there's no deduping."""
self.assert_compile(
select(column("a"), column("a"), column("a")),
"SELECT a, a, a",
dialect=default.DefaultDialect(),
)
c = column("a")
self.assert_compile(
select(c, c, c),
"SELECT a, a, a",
dialect=default.DefaultDialect(),
)
a, b = column("a"), column("b")
self.assert_compile(
select(a, b, b, b, a, a),
"SELECT a, b, b, b, a, a",
dialect=default.DefaultDialect(),
)
# using alternate keys.
a, b, c = (
Column("a", Integer, key="b"),
Column("b", Integer),
Column("c", Integer, key="a"),
)
self.assert_compile(
select(a, b, c, a, b, c),
"SELECT a, b, c, a, b, c",
dialect=default.DefaultDialect(),
)
self.assert_compile(
select(bindparam("a"), bindparam("b"), bindparam("c")),
"SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3",
dialect=default.DefaultDialect(paramstyle="named"),
)
self.assert_compile(
select(bindparam("a"), bindparam("b"), bindparam("c")),
"SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3",
dialect=default.DefaultDialect(paramstyle="qmark"),
)
self.assert_compile(
select(column("a"), column("a"), column("a")), "SELECT a, a, a"
)
s = select(bindparam("a"), bindparam("b"), bindparam("c"))
s = s.compile(dialect=default.DefaultDialect(paramstyle="qmark"))
eq_(s.positiontup, ["a", "b", "c"])
def test_overlapping_labels_use_labels(self):
foo = table("foo", column("id"), column("bar_id"))
foo_bar = table("foo_bar", column("id"))
stmt = select(foo, foo_bar).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
self.assert_compile(
stmt,
"SELECT foo.id AS foo_id, foo.bar_id AS foo_bar_id, "
"foo_bar.id AS foo_bar_id_1 "
"FROM foo, foo_bar",
)
def test_overlapping_labels_plus_dupes_use_labels(self):
foo = table("foo", column("id"), column("bar_id"))
foo_bar = table("foo_bar", column("id"))
# current approach is:
# 1. positional nature of columns is always maintained in all cases
# 2. two different columns that have the same label, second one
# is disambiguated
# 3. if the same column is repeated, it gets deduped using a special
# 'dedupe' label that will show two underscores
# 4. The disambiguating label generated in #2 also has to be deduped.
# 5. The derived columns, e.g. subquery().c etc. do not export the
# "dedupe" columns, at all. they are unreachable (because they
# are unreachable anyway in SQL unless you use "SELECT *")
#
# this is all new logic necessitated by #4753 since we allow columns
# to be repeated. We would still like the targeting of this column,
# both in a result set as well as in a derived selectable, to be
# unambiguous (DBs like postgresql won't let us reference an ambiguous
# label in a derived selectable even if its the same column repeated).
#
# this kind of thing happens of course because the ORM is in some
# more exotic cases writing in joins where columns may be duped.
# it might be nice to fix it on that side also, however SQLAlchemy
# has deduped columns in SELECT statements for 13 years so having a
# robust behavior when dupes are present is still very useful.
stmt = select(
foo.c.id,
foo.c.bar_id,
foo_bar.c.id,
foo.c.bar_id,
foo.c.id,
foo.c.bar_id,
foo_bar.c.id,
foo_bar.c.id,
).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
stmt,
"SELECT foo.id AS foo_id, "
"foo.bar_id AS foo_bar_id, " # 1. 1st foo.bar_id, as is
"foo_bar.id AS foo_bar_id_1, " # 2. 1st foo_bar.id, disamb from 1
"foo.bar_id AS foo_bar_id__1, " # 3. 2nd foo.bar_id, dedupe from 1
"foo.id AS foo_id__1, "
"foo.bar_id AS foo_bar_id__2, " # 4. 3rd foo.bar_id, dedupe again
"foo_bar.id AS foo_bar_id__3, " # 5. 2nd foo_bar.id
"foo_bar.id AS foo_bar_id__4 " # 6. 3rd foo_bar.id, dedupe again
"FROM foo, foo_bar",
)
eq_(
stmt.selected_columns.keys(),
[
"foo_id",
"foo_bar_id",
"foo_bar_id_1",
"foo_bar_id_2",
"foo_id_1",
"foo_bar_id_2",
"foo_bar_id_1",
"foo_bar_id_1",
],
)
# for the subquery, the labels created for repeated occurrences
# of the same column are not used. only the label applied to the
# first occurrence of each column is used
self.assert_compile(
select(stmt.subquery()).set_label_style(LABEL_STYLE_NONE),
"SELECT "
"anon_1.foo_id, " # from 1st foo.id in derived (line 1)
"anon_1.foo_bar_id, " # from 1st foo.bar_id in derived (line 2)
"anon_1.foo_bar_id_1, " # from 1st foo_bar.id in derived (line 3)
"anon_1.foo_bar_id, " # from 1st foo.bar_id in derived (line 2)
"anon_1.foo_id, " # from 1st foo.id in derived (line 1)
"anon_1.foo_bar_id, " # from 1st foo.bar_id in derived (line 2)
"anon_1.foo_bar_id_1, " # from 1st foo_bar.id in derived (line 3)
"anon_1.foo_bar_id_1 " # from 1st foo_bar.id in derived (line 3)
"FROM ("
"SELECT foo.id AS foo_id, "
"foo.bar_id AS foo_bar_id, " # 1. 1st foo.bar_id, as is
"foo_bar.id AS foo_bar_id_1, " # 2. 1st foo_bar.id, disamb from 1
"foo.bar_id AS foo_bar_id__1, " # 3. 2nd foo.bar_id, dedupe from 1
"foo.id AS foo_id__1, "
"foo.bar_id AS foo_bar_id__2, " # 4. 3rd foo.bar_id, dedupe again
"foo_bar.id AS foo_bar_id__3, " # 5. 2nd foo_bar.id
"foo_bar.id AS foo_bar_id__4 " # 6. 3rd foo_bar.id, dedupe again
"FROM foo, foo_bar"
") AS anon_1",
)
def test_overlapping_labels_plus_dupes_separate_keys_use_labels(self):
"""test a condition related to #6710.
prior to this issue CTE uses selected_columns to render the
"WITH RECURSIVE (colnames)" part. This test shows that this isn't
correct when keys are present.
See also test_cte ->
test_wrecur_ovlp_lbls_plus_dupes_separate_keys_use_labels
"""
m = MetaData()
foo = Table(
"foo",
m,
Column("id", Integer),
Column("bar_id", Integer, key="bb"),
)
foo_bar = Table("foo_bar", m, Column("id", Integer, key="bb"))
stmt = select(
foo.c.id,
foo.c.bb,
foo_bar.c.bb,
foo.c.bb,
foo.c.id,
foo.c.bb,
foo_bar.c.bb,
foo_bar.c.bb,
).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
# note these keys are not what renders in the SQL. These keys
# will be addressable in the result set but can't be used in
# rendering, such as for a CTE
eq_(
stmt.selected_columns.keys(),
[
"foo_id",
"foo_bb",
"foo_bar_bb",
"foo_bb_1",
"foo_id_1",
"foo_bb_1",
"foo_bar_bb_1",
"foo_bar_bb_1",
],
)
eq_(
stmt.subquery().c.keys(),
[
"foo_id",
"foo_bb",
"foo_bar_bb",
"foo_bb_1",
"foo_id_1",
"foo_bb_1",
"foo_bar_bb_1",
"foo_bar_bb_1",
],
)
self.assert_compile(
stmt,
"SELECT foo.id AS foo_id, "
"foo.bar_id AS foo_bar_id, " # 1. 1st foo.bar_id, as is
"foo_bar.id AS foo_bar_id_1, " # 2. 1st foo_bar.id, disamb from 1
"foo.bar_id AS foo_bar_id__1, " # 3. 2nd foo.bar_id, dedupe from 1
"foo.id AS foo_id__1, "
"foo.bar_id AS foo_bar_id__2, " # 4. 3rd foo.bar_id, dedupe again
"foo_bar.id AS foo_bar_id__3, " # 5. 2nd foo_bar.id
"foo_bar.id AS foo_bar_id__4 " # 6. 3rd foo_bar.id, dedupe again
"FROM foo, foo_bar",
)
def test_dupe_columns_use_labels(self):
t = table("t", column("a"), column("b"))
self.assert_compile(
select(t.c.a, t.c.a, t.c.b, t.c.a).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT t.a AS t_a, t.a AS t_a__1, t.b AS t_b, "
"t.a AS t_a__2 FROM t",
)
def test_dupe_columns_use_labels_derived_selectable(self):
t = table("t", column("a"), column("b"))
stmt = (
select(t.c.a, t.c.a, t.c.b, t.c.a)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
self.assert_compile(
select(stmt).set_label_style(LABEL_STYLE_NONE),
"SELECT anon_1.t_a, anon_1.t_a, anon_1.t_b, anon_1.t_a FROM "
"(SELECT t.a AS t_a, t.a AS t_a__1, t.b AS t_b, t.a AS t_a__2 "
"FROM t) AS anon_1",
)
def test_dupe_columns_use_labels_mix_annotations(self):
t = table("t", column("a"), column("b"))
a, b, a_a = t.c.a, t.c.b, t.c.a._annotate({"some_orm_thing": True})
self.assert_compile(
select(a, a_a, b, a_a).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT t.a AS t_a, t.a AS t_a__1, t.b AS t_b, "
"t.a AS t_a__2 FROM t",
)
self.assert_compile(
select(a_a, a, b, a_a).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT t.a AS t_a, t.a AS t_a__1, t.b AS t_b, "
"t.a AS t_a__2 FROM t",
)
self.assert_compile(
select(a_a, a_a, b, a).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT t.a AS t_a, t.a AS t_a__1, t.b AS t_b, "
"t.a AS t_a__2 FROM t",
)
def test_dupe_columns_use_labels_derived_selectable_mix_annotations(self):
t = table("t", column("a"), column("b"))
a, b, a_a = t.c.a, t.c.b, t.c.a._annotate({"some_orm_thing": True})
stmt = (
select(a, a_a, b, a_a)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
self.assert_compile(
select(stmt).set_label_style(LABEL_STYLE_NONE),
"SELECT anon_1.t_a, anon_1.t_a, anon_1.t_b, anon_1.t_a FROM "
"(SELECT t.a AS t_a, t.a AS t_a__1, t.b AS t_b, t.a AS t_a__2 "
"FROM t) AS anon_1",
)
def test_overlapping_labels_plus_dupes_use_labels_mix_annotations(self):
foo = table("foo", column("id"), column("bar_id"))
foo_bar = table("foo_bar", column("id"))
foo_bar__id = foo_bar.c.id._annotate({"some_orm_thing": True})
stmt = select(
foo.c.bar_id,
foo_bar.c.id,
foo_bar.c.id,
foo_bar__id,
foo_bar__id,
).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
stmt,
"SELECT foo.bar_id AS foo_bar_id, foo_bar.id AS foo_bar_id_1, "
"foo_bar.id AS foo_bar_id__1, foo_bar.id AS foo_bar_id__2, "
"foo_bar.id AS foo_bar_id__3 FROM foo, foo_bar",
)
def test_dupe_columns_use_labels_from_anon(self):
t = table("t", column("a"), column("b"))
a = t.alias()
# second and third occurrences of a.c.a are labeled, but are
# dupes of each other.
self.assert_compile(
select(a.c.a, a.c.a, a.c.b, a.c.a).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT t_1.a AS t_1_a, t_1.a AS t_1_a__1, t_1.b AS t_1_b, "
"t_1.a AS t_1_a__2 "
"FROM t AS t_1",
)
def test_nested_label_targeting(self):
"""test nested anonymous label generation."""
s1 = table1.select()
s2 = s1.alias()
s3 = select(s2).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
s4 = s3.alias()
s5 = select(s4).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
s5,
"SELECT anon_1.anon_2_myid AS "
"anon_1_anon_2_myid, anon_1.anon_2_name AS "
"anon_1_anon_2_name, anon_1.anon_2_descript"
"ion AS anon_1_anon_2_description FROM "
"(SELECT anon_2.myid AS anon_2_myid, "
"anon_2.name AS anon_2_name, "
"anon_2.description AS anon_2_description "
"FROM (SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable) AS anon_2) "
"AS anon_1",
)
def test_nested_label_targeting_keyed(self):
s1 = keyed.select()
s2 = s1.alias()
s3 = select(s2).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
s3,
"SELECT anon_1.x AS anon_1_x, "
"anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM "
"(SELECT keyed.x AS x, keyed.y "
"AS y, keyed.z AS z FROM keyed) AS anon_1",
)
s4 = s3.alias()
s5 = select(s4).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
s5,
"SELECT anon_1.anon_2_x AS anon_1_anon_2_x, "
"anon_1.anon_2_y AS anon_1_anon_2_y, "
"anon_1.anon_2_z AS anon_1_anon_2_z "
"FROM (SELECT anon_2.x AS anon_2_x, "
"anon_2.y AS anon_2_y, "
"anon_2.z AS anon_2_z FROM "
"(SELECT keyed.x AS x, keyed.y AS y, keyed.z "
"AS z FROM keyed) AS anon_2) AS anon_1",
)
@testing.combinations("per cent", "per % cent", "%percent")
def test_percent_names_collide_with_anonymizing(self, name):
table1 = table("t1", column(name))
jj = select(table1.c[name]).subquery()
jjj = join(table1, jj, table1.c[name] == jj.c[name])
j2 = (
jjj.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery("foo")
)
self.assert_compile(
j2.select(),
'SELECT foo."t1_%(name)s", foo."anon_1_%(name)s" FROM '
'(SELECT t1."%(name)s" AS "t1_%(name)s", anon_1."%(name)s" '
'AS "anon_1_%(name)s" FROM t1 JOIN (SELECT t1."%(name)s" AS '
'"%(name)s" FROM t1) AS anon_1 ON t1."%(name)s" = '
'anon_1."%(name)s") AS foo' % {"name": name},
)
def test_exists(self):
s = select(table1.c.myid).where(table1.c.myid == 5)
self.assert_compile(
exists(s),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)",
)
self.assert_compile(
exists(s.scalar_subquery()),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)",
)
self.assert_compile(
exists(table1.c.myid).where(table1.c.myid == 5).select(),
"SELECT EXISTS (SELECT mytable.myid FROM "
"mytable WHERE mytable.myid = :myid_1) AS anon_1",
params={"mytable_myid": 5},
)
self.assert_compile(
select(table1, exists(1).select_from(table2)),
"SELECT mytable.myid, mytable.name, "
"mytable.description, EXISTS (SELECT 1 "
"FROM myothertable) AS anon_1 FROM mytable",
params={},
)
self.assert_compile(
select(table1, exists(1).select_from(table2).label("foo")),
"SELECT mytable.myid, mytable.name, "
"mytable.description, EXISTS (SELECT 1 "
"FROM myothertable) AS foo FROM mytable",
params={},
)
self.assert_compile(
table1.select().where(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
self.assert_compile(
table1.select().where(
exists()
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
self.assert_compile(
select(
or_(
exists().where(table2.c.otherid == "foo"),
exists().where(table2.c.otherid == "bar"),
)
),
"SELECT (EXISTS (SELECT * FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)) "
"OR (EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = :otherid_2)) AS anon_1",
)
self.assert_compile(
select(exists(1)), "SELECT EXISTS (SELECT 1) AS anon_1"
)
self.assert_compile(
select(~exists(1)), "SELECT NOT (EXISTS (SELECT 1)) AS anon_1"
)
self.assert_compile(
select(~(~exists(1))),
"SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1",
)
self.assert_compile(
exists(42)
.select_from(table1)
.where(table1.c.name == "foo", table1.c.description == "bar"),
"EXISTS (SELECT 42 FROM mytable WHERE mytable.name = :name_1 "
"AND mytable.description = :description_1)",
)
def test_exists_method(self):
subq = (
select(func.count(table2.c.otherid))
.where(table2.c.otherid == table1.c.myid)
.correlate(table1)
.group_by(table2.c.otherid)
.having(func.count(table2.c.otherid) > 1)
.exists()
)
self.assert_compile(
table1.select().where(subq),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE EXISTS (SELECT count(myothertable.otherid) "
"AS count_1 FROM myothertable WHERE myothertable.otherid = "
"mytable.myid GROUP BY myothertable.otherid "
"HAVING count(myothertable.otherid) > :count_2)",
)
def test_where_subquery(self):
s = (
select(addresses.c.street)
.where(addresses.c.user_id == users.c.user_id)
.alias("s")
)
# don't correlate in a FROM list
self.assert_compile(
select(users, s.c.street).select_from(s),
"SELECT users.user_id, users.user_name, users.password, s.street "
"FROM (SELECT addresses.street AS street FROM addresses, users "
"WHERE addresses.user_id = users.user_id) AS s, users",
)
self.assert_compile(
table1.select().where(
table1.c.myid
== select(table1.c.myid)
.where(table1.c.name == "jack")
.scalar_subquery()
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"mytable.myid = (SELECT mytable.myid FROM "
"mytable WHERE mytable.name = :name_1)",
)
self.assert_compile(
table1.select().where(
table1.c.myid
== select(table2.c.otherid)
.where(table1.c.name == table2.c.othername)
.scalar_subquery()
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"mytable.myid = (SELECT "
"myothertable.otherid FROM myothertable "
"WHERE mytable.name = myothertable.othernam"
"e)",
)
self.assert_compile(
table1.select().where(
exists(1).where(table2.c.otherid == table1.c.myid)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT 1 FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
talias = table1.alias("ta")
s = (
select(talias)
.where(exists(1).where(table2.c.otherid == talias.c.myid))
.subquery("sq2")
)
self.assert_compile(
select(s, table1),
"SELECT sq2.myid, sq2.name, "
"sq2.description, mytable.myid AS myid_1, "
"mytable.name AS name_1, "
"mytable.description AS description_1 FROM "
"(SELECT ta.myid AS myid, ta.name AS name, "
"ta.description AS description FROM "
"mytable AS ta WHERE EXISTS (SELECT 1 FROM "
"myothertable WHERE myothertable.otherid = "
"ta.myid)) AS sq2, mytable",
)
# test constructing the outer query via append_column(), which
# occurs in the ORM's Query object
s = (
select()
.where(exists(1).where(table2.c.otherid == table1.c.myid))
.select_from(table1)
)
s.add_columns.non_generative(s, table1)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable WHERE "
"EXISTS (SELECT 1 FROM myothertable WHERE "
"myothertable.otherid = mytable.myid)",
)
def test_orderby_subquery(self):
self.assert_compile(
table1.select().order_by(
select(table2.c.otherid)
.where(table1.c.myid == table2.c.otherid)
.scalar_subquery()
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY "
"(SELECT myothertable.otherid FROM "
"myothertable WHERE mytable.myid = "
"myothertable.otherid)",
)
self.assert_compile(
table1.select().order_by(
desc(
select(table2.c.otherid)
.where(table1.c.myid == table2.c.otherid)
.scalar_subquery()
)
),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY "
"(SELECT myothertable.otherid FROM "
"myothertable WHERE mytable.myid = "
"myothertable.otherid) DESC",
)
def test_scalar_select(self):
s = select(table1.c.myid).correlate(None).scalar_subquery()
self.assert_compile(
select(table1, s),
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT mytable.myid "
"FROM mytable) AS anon_1 FROM mytable",
)
s = select(table1.c.myid).scalar_subquery()
self.assert_compile(
select(table2, s),
"SELECT myothertable.otherid, "
"myothertable.othername, (SELECT "
"mytable.myid FROM mytable) AS anon_1 FROM "
"myothertable",
)
s = select(table1.c.myid).correlate(None).scalar_subquery()
self.assert_compile(
select(table1, s),
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT mytable.myid "
"FROM mytable) AS anon_1 FROM mytable",
)
s = select(table1.c.myid).scalar_subquery()
s2 = s.where(table1.c.myid == 5)
self.assert_compile(
s2,
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)",
)
self.assert_compile(s, "(SELECT mytable.myid FROM mytable)")
# test that aliases use scalar_subquery() when used in an explicitly
# scalar context
s = select(table1.c.myid).scalar_subquery()
self.assert_compile(
select(table1.c.myid).where(table1.c.myid == s),
"SELECT mytable.myid FROM mytable WHERE "
"mytable.myid = (SELECT mytable.myid FROM "
"mytable)",
)
self.assert_compile(
select(table1.c.myid).where(table1.c.myid < s),
"SELECT mytable.myid FROM mytable WHERE "
"mytable.myid < (SELECT mytable.myid FROM "
"mytable)",
)
s = select(table1.c.myid).scalar_subquery()
self.assert_compile(
select(table2, s),
"SELECT myothertable.otherid, "
"myothertable.othername, (SELECT "
"mytable.myid FROM mytable) AS anon_1 FROM "
"myothertable",
)
# test expressions against scalar selects
self.assert_compile(
select(s - literal(8)),
"SELECT (SELECT mytable.myid FROM mytable) "
"- :param_1 AS anon_1",
)
self.assert_compile(
select(select(table1.c.name).scalar_subquery() + literal("x")),
"SELECT (SELECT mytable.name FROM mytable) "
"|| :param_1 AS anon_1",
)
self.assert_compile(
select(s > literal(8)),
"SELECT (SELECT mytable.myid FROM mytable) "
"> :param_1 AS anon_1",
)
self.assert_compile(
select(select(table1.c.name).label("foo")),
"SELECT (SELECT mytable.name FROM mytable) AS foo",
)
# scalar selects should not have any attributes on their 'c' or
# 'columns' attribute
s = select(table1.c.myid).scalar_subquery()
assert_raises_message(
exc.InvalidRequestError,
"Scalar Select expression has no columns; use this "
"object directly within a column-level expression.",
lambda: s.c.foo,
)
assert_raises_message(
exc.InvalidRequestError,
"Scalar Select expression has no columns; use this "
"object directly within a column-level expression.",
lambda: s.columns.foo,
)
zips = table(
"zips", column("zipcode"), column("latitude"), column("longitude")
)
places = table("places", column("id"), column("nm"))
zipcode = "12345"
qlat = (
select(zips.c.latitude)
.where(zips.c.zipcode == zipcode)
.correlate(None)
.scalar_subquery()
)
qlng = (
select(zips.c.longitude)
.where(zips.c.zipcode == zipcode)
.correlate(None)
.scalar_subquery()
)
q = (
select(
places.c.id,
places.c.nm,
zips.c.zipcode,
func.latlondist(qlat, qlng).label("dist"),
)
.where(zips.c.zipcode == zipcode)
.order_by("dist", places.c.nm)
)
self.assert_compile(
q,
"SELECT places.id, places.nm, "
"zips.zipcode, latlondist((SELECT "
"zips.latitude FROM zips WHERE "
"zips.zipcode = :zipcode_1), (SELECT "
"zips.longitude FROM zips WHERE "
"zips.zipcode = :zipcode_2)) AS dist FROM "
"places, zips WHERE zips.zipcode = "
":zipcode_3 ORDER BY dist, places.nm",
)
zalias = zips.alias("main_zip")
qlat = (
select(zips.c.latitude)
.where(zips.c.zipcode == zalias.c.zipcode)
.scalar_subquery()
)
qlng = (
select(zips.c.longitude)
.where(zips.c.zipcode == zalias.c.zipcode)
.scalar_subquery()
)
q = select(
places.c.id,
places.c.nm,
zalias.c.zipcode,
func.latlondist(qlat, qlng).label("dist"),
).order_by("dist", places.c.nm)
self.assert_compile(
q,
"SELECT places.id, places.nm, "
"main_zip.zipcode, latlondist((SELECT "
"zips.latitude FROM zips WHERE "
"zips.zipcode = main_zip.zipcode), (SELECT "
"zips.longitude FROM zips WHERE "
"zips.zipcode = main_zip.zipcode)) AS dist "
"FROM places, zips AS main_zip ORDER BY "
"dist, places.nm",
)
a1 = table2.alias("t2alias")
s1 = (
select(a1.c.otherid)
.where(table1.c.myid == a1.c.otherid)
.scalar_subquery()
)
j1 = table1.join(table2, table1.c.myid == table2.c.otherid)
s2 = select(table1, s1).select_from(j1)
self.assert_compile(
s2,
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT "
"t2alias.otherid FROM myothertable AS "
"t2alias WHERE mytable.myid = "
"t2alias.otherid) AS anon_1 FROM mytable "
"JOIN myothertable ON mytable.myid = "
"myothertable.otherid",
)
def test_label_comparison_one(self):
x = func.lala(table1.c.myid).label("foo")
self.assert_compile(
select(x).where(x == 5),
"SELECT lala(mytable.myid) AS foo FROM "
"mytable WHERE lala(mytable.myid) = "
":param_1",
)
def test_label_comparison_two(self):
self.assert_compile(
label("bar", column("foo", type_=String)) + "foo",
"foo || :param_1",
)
def test_order_by_labels_enabled_negative_cases(self):
"""test order_by_labels enabled but the cases where we expect
ORDER BY the expression without the label name"""
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
# binary expressions render as the expression without labels
self.assert_compile(
select(lab1, lab2).order_by(lab1 + "test"),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1 + :param_1",
dialect=dialect,
)
# labels within functions in the columns clause render
# with the expression
self.assert_compile(
select(lab1, func.foo(lab1)).order_by(lab1, func.foo(lab1)),
"SELECT mytable.myid + :myid_1 AS foo, "
"foo(mytable.myid + :myid_1) AS foo_1 FROM mytable "
"ORDER BY foo, foo(mytable.myid + :myid_1)",
dialect=dialect,
)
# here, 'name' is implicitly available, but w/ #3882 we don't
# want to render a name that isn't specifically a Label elsewhere
# in the query
self.assert_compile(
select(table1.c.myid).order_by(table1.c.name.label("name")),
"SELECT mytable.myid FROM mytable ORDER BY mytable.name",
)
# as well as if it doesn't match
self.assert_compile(
select(table1.c.myid).order_by(
func.lower(table1.c.name).label("name")
),
"SELECT mytable.myid FROM mytable ORDER BY lower(mytable.name)",
)
@testing.combinations(
(desc, "DESC"),
(asc, "ASC"),
(nulls_first, "NULLS FIRST"),
(nulls_last, "NULLS LAST"),
(nullsfirst, "NULLS FIRST"),
(nullslast, "NULLS LAST"),
(lambda c: c.desc().nulls_last(), "DESC NULLS LAST"),
(lambda c: c.desc().nullslast(), "DESC NULLS LAST"),
(lambda c: c.nulls_first().asc(), "NULLS FIRST ASC"),
)
def test_order_by_labels_enabled(self, operator, expected):
"""test positive cases with order_by_labels enabled. this is
multipled out to all the ORDER BY modifier operators
(see #11592)
"""
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
self.assert_compile(
select(lab1, lab2).order_by(lab1, operator(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
f"ORDER BY foo, bar {expected}",
dialect=dialect,
)
# the function embedded label renders as the function
self.assert_compile(
select(lab1, lab2).order_by(func.hoho(lab1), operator(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
f"ORDER BY hoho(mytable.myid + :myid_1), bar {expected}",
dialect=dialect,
)
lx = (table1.c.myid + table1.c.myid).label("lx")
ly = (func.lower(table1.c.name) + table1.c.description).label("ly")
self.assert_compile(
select(lx, ly).order_by(lx, operator(ly)),
"SELECT mytable.myid + mytable.myid AS lx, "
"lower(mytable.name) || mytable.description AS ly "
f"FROM mytable ORDER BY lx, ly {expected}",
dialect=dialect,
)
# expression isn't actually the same thing (even though label is)
self.assert_compile(
select(lab1, lab2).order_by(
table1.c.myid.label("foo"),
operator(table1.c.name.label("bar")),
),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
f"ORDER BY mytable.myid, mytable.name {expected}",
dialect=dialect,
)
# it's also an exact match, not aliased etc.
self.assert_compile(
select(lab1, lab2).order_by(
operator(table1.alias().c.name.label("bar"))
),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
f"ORDER BY mytable_1.name {expected}",
dialect=dialect,
)
# but! it's based on lineage
lab2_lineage = lab2.element._clone()
self.assert_compile(
select(lab1, lab2).order_by(operator(lab2_lineage.label("bar"))),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
f"ORDER BY bar {expected}",
dialect=dialect,
)
def test_order_by_labels_disabled(self):
"""test when the order_by_labels feature is disabled entirely"""
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
dialect.supports_simple_order_by_label = False
self.assert_compile(
select(lab1, lab2).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC",
dialect=dialect,
)
self.assert_compile(
select(lab1, lab2).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), "
"somefunc(mytable.name) DESC",
dialect=dialect,
)
def test_no_group_by_labels(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
self.assert_compile(
select(lab1, lab2).group_by(lab1, lab2),
"SELECT mytable.myid + :myid_1 AS foo, somefunc(mytable.name) "
"AS bar FROM mytable GROUP BY mytable.myid + :myid_1, "
"somefunc(mytable.name)",
dialect=dialect,
)
def test_conjunctions(self):
a, b, c = text("a"), text("b"), text("c")
x = and_(a, b, c)
assert isinstance(x.type, Boolean)
assert str(x) == "a AND b AND c"
self.assert_compile(
select(x.label("foo")), "SELECT a AND b AND c AS foo"
)
self.assert_compile(
and_(
table1.c.myid == 12,
table1.c.name == "asdf",
table2.c.othername == "foo",
text("sysdate() = today()"),
),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "
"AND myothertable.othername = "
":othername_1 AND sysdate() = today()",
)
self.assert_compile(
and_(
table1.c.myid == 12,
or_(
table2.c.othername == "asdf",
table2.c.othername == "foo",
table2.c.otherid == 9,
),
text("sysdate() = today()"),
),
"mytable.myid = :myid_1 AND (myothertable.othername = "
":othername_1 OR myothertable.othername = :othername_2 OR "
"myothertable.otherid = :otherid_1) AND sysdate() = "
"today()",
checkparams={
"othername_1": "asdf",
"othername_2": "foo",
"otherid_1": 9,
"myid_1": 12,
},
)
# test a generator
self.assert_compile(
and_(
conj for conj in [table1.c.myid == 12, table1.c.name == "asdf"]
),
"mytable.myid = :myid_1 AND mytable.name = :name_1",
)
def test_nested_conjunctions_short_circuit(self):
"""test that empty or_(), and_() conjunctions are collapsed by
an enclosing conjunction."""
t = table("t", column("x"))
self.assert_compile(
select(t).where(and_(t.c.x == 5, or_(and_(or_(t.c.x == 7))))),
"SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2",
)
self.assert_compile(
select(t).where(and_(or_(t.c.x == 12, and_(or_(t.c.x == 8))))),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
self.assert_compile(
select(t).where(
and_(or_(or_(t.c.x == 12), and_(or_(and_(t.c.x == 8)))))
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
self.assert_compile(
select(t).where(
and_(
or_(
or_(t.c.x == 12),
and_(
BooleanClauseList._construct_raw(operators.or_),
or_(and_(t.c.x == 8)),
BooleanClauseList._construct_raw(operators.and_),
),
)
)
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
def test_true_short_circuit(self):
t = table("t", column("x"))
self.assert_compile(
select(t).where(true()),
"SELECT t.x FROM t WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
self.assert_compile(
select(t).where(true()),
"SELECT t.x FROM t WHERE true",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
self.assert_compile(
select(t),
"SELECT t.x FROM t",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
def test_distinct(self):
self.assert_compile(
select(table1.c.myid.distinct()),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select(distinct(table1.c.myid)),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select(distinct(table1.c.myid)).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT DISTINCT mytable.myid FROM mytable",
)
# the bug fixed here as part of #6008 is the same bug that's
# in 1.3 as well, producing
# "SELECT anon_2.anon_1 FROM (SELECT distinct mytable.myid
# FROM mytable) AS anon_2"
self.assert_compile(
select(select(distinct(table1.c.myid)).subquery()),
"SELECT anon_2.anon_1 FROM (SELECT "
"DISTINCT mytable.myid AS anon_1 FROM mytable) AS anon_2",
)
self.assert_compile(
select(table1.c.myid).distinct(),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select(func.count(table1.c.myid.distinct())),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable",
)
self.assert_compile(
select(func.count(distinct(table1.c.myid))),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable",
)
def test_distinct_on(self):
with testing.expect_deprecated(
"Passing expression to",
"DISTINCT ON is currently supported only by the PostgreSQL "
"dialect",
):
select("*").distinct(table1.c.myid).compile()
def test_where_empty(self):
self.assert_compile(
select(table1.c.myid).where(
BooleanClauseList._construct_raw(operators.and_)
),
"SELECT mytable.myid FROM mytable",
)
self.assert_compile(
select(table1.c.myid).where(
BooleanClauseList._construct_raw(operators.or_)
),
"SELECT mytable.myid FROM mytable",
)
def test_where_multiple(self):
self.assert_compile(
select(table1.c.myid).where(
table1.c.myid == 12, table1.c.name == "foobar"
),
"SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1 "
"AND mytable.name = :name_1",
)
def test_order_by_nulls(self):
self.assert_compile(
table2.select().order_by(
table2.c.otherid,
table2.c.othername.desc().nulls_first(),
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS FIRST",
)
self.assert_compile(
table2.select().order_by(
table2.c.otherid,
table2.c.othername.desc().nulls_last(),
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS LAST",
)
self.assert_compile(
table2.select().order_by(
table2.c.otherid.nulls_last(),
table2.c.othername.desc().nulls_first(),
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS LAST, "
"myothertable.othername DESC NULLS FIRST",
)
self.assert_compile(
table2.select().order_by(
table2.c.otherid.nulls_first(),
table2.c.othername.desc(),
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC",
)
self.assert_compile(
table2.select().order_by(
table2.c.otherid.nulls_first(),
table2.c.othername.desc().nulls_last(),
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC NULLS LAST",
)
def test_orderby_groupby(self):
self.assert_compile(
table2.select().order_by(
table2.c.otherid, asc(table2.c.othername)
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername ASC",
)
self.assert_compile(
table2.select().order_by(
table2.c.otherid, table2.c.othername.desc()
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC",
)
# generative order_by
self.assert_compile(
table2.select()
.order_by(table2.c.otherid)
.order_by(table2.c.othername.desc()),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC",
)
self.assert_compile(
table2.select()
.order_by(table2.c.otherid)
.order_by(table2.c.othername.desc())
.order_by(None),
"SELECT myothertable.otherid, myothertable.othername "
"FROM myothertable",
)
self.assert_compile(
select(table2.c.othername, func.count(table2.c.otherid)).group_by(
table2.c.othername
),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername",
)
# generative group by
self.assert_compile(
select(table2.c.othername, func.count(table2.c.otherid)).group_by(
table2.c.othername
),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername",
)
self.assert_compile(
select(table2.c.othername, func.count(table2.c.otherid))
.group_by(table2.c.othername)
.group_by(None),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable",
)
self.assert_compile(
select(table2.c.othername, func.count(table2.c.otherid))
.group_by(table2.c.othername)
.order_by(table2.c.othername),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable "
"GROUP BY myothertable.othername ORDER BY myothertable.othername",
)
def test_custom_order_by_clause(self):
class CustomCompiler(PGCompiler):
def order_by_clause(self, select, **kw):
return super().order_by_clause(select, **kw) + " CUSTOMIZED"
class CustomDialect(PGDialect):
name = "custom"
statement_compiler = CustomCompiler
stmt = select(table1.c.myid).order_by(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable ORDER BY "
"mytable.myid CUSTOMIZED",
dialect=CustomDialect(),
)
def test_custom_group_by_clause(self):
class CustomCompiler(PGCompiler):
def group_by_clause(self, select, **kw):
return super().group_by_clause(select, **kw) + " CUSTOMIZED"
class CustomDialect(PGDialect):
name = "custom"
statement_compiler = CustomCompiler
stmt = select(table1.c.myid).group_by(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable GROUP BY "
"mytable.myid CUSTOMIZED",
dialect=CustomDialect(),
)
def test_for_update(self):
self.assert_compile(
table1.select().where(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
# not supported by dialect, should just use update
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
def test_alias(self):
# test the alias for a table1. column names stay the same,
# table name "changes" to "foo".
self.assert_compile(
select(table1.alias("foo")),
"SELECT foo.myid, foo.name, foo.description FROM mytable AS foo",
)
for dialect in (oracle.dialect(),):
self.assert_compile(
select(table1.alias("foo")),
"SELECT foo.myid, foo.name, foo.description FROM mytable foo",
dialect=dialect,
)
self.assert_compile(
select(table1.alias()),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1",
)
# create a select for a join of two tables. use_labels
# means the column names will have labels tablename_columnname,
# which become the column keys accessible off the Selectable object.
# also, only use one column from the second table and all columns
# from the first table1.
q = (
select(table1, table2.c.otherid)
.where(table1.c.myid == table2.c.otherid)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
# make an alias of the "selectable". column names
# stay the same (i.e. the labels), table name "changes" to "t2view".
a = q.alias("t2view")
# select from that alias, also using labels. two levels of labels
# should produce two underscores.
# also, reference the column "mytable_myid" off of the t2view alias.
self.assert_compile(
a.select()
.where(a.c.mytable_myid == 9)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT t2view.mytable_myid AS t2view_mytable_myid, "
"t2view.mytable_name "
"AS t2view_mytable_name, "
"t2view.mytable_description AS t2view_mytable_description, "
"t2view.myothertable_otherid AS t2view_myothertable_otherid FROM "
"(SELECT mytable.myid AS mytable_myid, "
"mytable.name AS mytable_name, "
"mytable.description AS mytable_description, "
"myothertable.otherid AS "
"myothertable_otherid FROM mytable, myothertable "
"WHERE mytable.myid = "
"myothertable.otherid) AS t2view "
"WHERE t2view.mytable_myid = :mytable_myid_1",
)
def test_alias_nesting_table(self):
self.assert_compile(
select(table1.alias("foo").alias("bar").alias("bat")),
"SELECT bat.myid, bat.name, bat.description FROM mytable AS bat",
)
self.assert_compile(
select(table1.alias(None).alias("bar").alias("bat")),
"SELECT bat.myid, bat.name, bat.description FROM mytable AS bat",
)
self.assert_compile(
select(table1.alias("foo").alias(None).alias("bat")),
"SELECT bat.myid, bat.name, bat.description FROM mytable AS bat",
)
self.assert_compile(
select(table1.alias("foo").alias("bar").alias(None)),
"SELECT bar_1.myid, bar_1.name, bar_1.description "
"FROM mytable AS bar_1",
)
self.assert_compile(
select(table1.alias("foo").alias(None).alias(None)),
"SELECT anon_1.myid, anon_1.name, anon_1.description "
"FROM mytable AS anon_1",
)
def test_alias_nesting_subquery(self):
stmt = select(table1).subquery()
self.assert_compile(
select(stmt.alias("foo").alias("bar").alias("bat")),
"SELECT bat.myid, bat.name, bat.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS bat",
)
self.assert_compile(
select(stmt.alias("foo").alias(None).alias(None)),
"SELECT anon_1.myid, anon_1.name, anon_1.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS anon_1",
)
def test_prefix(self):
self.assert_compile(
table1.select()
.prefix_with("SQL_CALC_FOUND_ROWS")
.prefix_with("SQL_SOME_WEIRD_MYSQL_THING"),
"SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
)
def test_prefix_dialect_specific(self):
self.assert_compile(
table1.select()
.prefix_with("SQL_CALC_FOUND_ROWS", dialect="sqlite")
.prefix_with("SQL_SOME_WEIRD_MYSQL_THING", dialect="mysql"),
"SELECT SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
dialect=mysql.dialect(),
)
def test_collate(self):
# columns clause
self.assert_compile(
select(column("x").collate("bar")),
"SELECT x COLLATE bar AS anon_1",
)
# WHERE clause
self.assert_compile(
select(column("x")).where(column("x").collate("bar") == "foo"),
"SELECT x WHERE (x COLLATE bar) = :param_1",
)
# ORDER BY clause
self.assert_compile(
select(column("x")).order_by(column("x").collate("bar")),
"SELECT x ORDER BY x COLLATE bar",
)
def test_literal(self):
self.assert_compile(
select(literal("foo")), "SELECT :param_1 AS anon_1"
)
self.assert_compile(
select(literal("foo") + literal("bar")).select_from(table1),
"SELECT :param_1 || :param_2 AS anon_1 FROM mytable",
)
def test_calculated_columns(self):
value_tbl = table(
"values",
column("id", Integer),
column("val1", Float),
column("val2", Float),
)
self.assert_compile(
select(
value_tbl.c.id,
(value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1,
),
"SELECT values.id, (values.val2 - values.val1) "
"/ CAST(values.val1 AS FLOAT) AS anon_1 FROM values",
)
self.assert_compile(
select(value_tbl.c.id).where(
(value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1 > 2.0,
),
"SELECT values.id FROM values WHERE "
"(values.val2 - values.val1) / "
"CAST(values.val1 AS FLOAT) > :param_1",
)
self.assert_compile(
select(value_tbl.c.id).where(
value_tbl.c.val1
/ (value_tbl.c.val2 - value_tbl.c.val1)
/ value_tbl.c.val1
> 2.0,
),
"SELECT values.id FROM values WHERE "
"(values.val1 / CAST((values.val2 - values.val1) AS FLOAT)) "
"/ CAST(values.val1 AS FLOAT) > :param_1",
)
def test_percent_chars(self):
t = table(
"table%name",
column("percent%"),
column("%(oneofthese)s"),
column("spaces % more spaces"),
)
self.assert_compile(
t.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"""SELECT "table%name"."percent%" AS "table%name_percent%", """
""""table%name"."%(oneofthese)s" AS """
""""table%name_%(oneofthese)s", """
""""table%name"."spaces % more spaces" AS """
""""table%name_spaces % """
'''more spaces" FROM "table%name"''',
)
def test_joins(self):
self.assert_compile(
join(table2, table1, table1.c.myid == table2.c.otherid).select(),
"SELECT myothertable.otherid, myothertable.othername, "
"mytable.myid, mytable.name, mytable.description FROM "
"myothertable JOIN mytable ON mytable.myid = myothertable.otherid",
)
self.assert_compile(
select(table1).select_from(
join(table1, table2, table1.c.myid == table2.c.otherid)
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable JOIN myothertable ON mytable.myid = myothertable.otherid",
)
self.assert_compile(
select(
join(
join(table1, table2, table1.c.myid == table2.c.otherid),
table3,
table1.c.myid == table3.c.userid,
)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid, "
"thirdtable.otherstuff FROM mytable JOIN myothertable "
"ON mytable.myid ="
" myothertable.otherid JOIN thirdtable ON "
"mytable.myid = thirdtable.userid",
)
self.assert_compile(
join(
users, addresses, users.c.user_id == addresses.c.user_id
).select(),
"SELECT users.user_id, users.user_name, users.password, "
"addresses.address_id, addresses.user_id AS user_id_1, "
"addresses.street, "
"addresses.city, addresses.state, addresses.zip "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id",
)
self.assert_compile(
select(table1, table2, table3).select_from(
join(
table1, table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table1.c.myid == table3.c.userid)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable "
"JOIN myothertable ON mytable.myid "
"= myothertable.otherid LEFT OUTER JOIN thirdtable "
"ON mytable.myid ="
" thirdtable.userid",
)
self.assert_compile(
select(table1, table2, table3).select_from(
outerjoin(
table1,
join(table2, table3, table2.c.otherid == table3.c.userid),
table1.c.myid == table2.c.otherid,
)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable LEFT OUTER JOIN "
"(myothertable "
"JOIN thirdtable ON myothertable.otherid = "
"thirdtable.userid) ON "
"mytable.myid = myothertable.otherid",
)
query = (
select(table1, table2)
.where(
or_(
table1.c.name == "fred",
table1.c.myid == 10,
table2.c.othername != "jack",
text("EXISTS (select yay from foo where boo = lar)"),
)
)
.select_from(
outerjoin(table1, table2, table1.c.myid == table2.c.otherid)
)
)
self.assert_compile(
query,
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername "
"FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = "
"myothertable.otherid WHERE mytable.name = :name_1 OR "
"mytable.myid = :myid_1 OR myothertable.othername != :othername_1 "
"OR EXISTS (select yay from foo where boo = lar)",
)
def test_full_outer_join(self):
for spec in [
join(table1, table2, table1.c.myid == table2.c.otherid, full=True),
outerjoin(
table1, table2, table1.c.myid == table2.c.otherid, full=True
),
table1.join(table2, table1.c.myid == table2.c.otherid, full=True),
table1.outerjoin(
table2, table1.c.myid == table2.c.otherid, full=True
),
]:
stmt = select(table1).select_from(spec)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable FULL OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_compound_selects(self):
assert_raises_message(
exc.CompileError,
"All selectables passed to CompoundSelect "
"must have identical numbers of columns; "
"select #1 has 2 columns, select #2 has 3",
union(table3.select(), table1.select()).compile,
)
x = union(
select(table1).where(table1.c.myid == 5),
select(table1).where(table1.c.myid == 12),
).order_by(table1.c.myid)
self.assert_compile(
x,
"SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE "
"mytable.myid = :myid_1 UNION "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_2 "
"ORDER BY myid",
)
x = union(select(table1), select(table1))
x = union(x, select(table1))
self.assert_compile(
x,
"(SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable) UNION SELECT mytable.myid,"
" mytable.name, mytable.description FROM mytable",
)
u1 = union(
select(table1.c.myid, table1.c.name),
select(table2),
select(table3),
).order_by("name")
self.assert_compile(
u1,
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable "
"UNION SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable ORDER BY name",
)
u1s = u1.subquery()
assert u1s.corresponding_column(table2.c.otherid) is u1s.c.myid
self.assert_compile(
union(select(table1.c.myid, table1.c.name), select(table2))
.order_by("myid")
.offset(10)
.limit(5),
# note table name is omitted here. The CompoundSelect, inside of
# _label_resolve_dict(), creates a subquery of itself and then
# turns "named_with_column" off, so that we can order by the
# "myid" name as relative to the CompoundSelect itself without it
# having a name.
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername "
"FROM myothertable ORDER BY myid "
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 5, "param_2": 10},
)
# these tests are mostly in test_text, however adding one here
# to check the special thing CompoundSelect does with labels
assert_raises_message(
exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY / "
"DISTINCT etc. Textual "
"SQL expression 'noname'",
union(
select(table1.c.myid, table1.c.name),
select(table2),
)
.order_by("noname")
.compile,
)
self.assert_compile(
union(
select(
table1.c.myid,
table1.c.name,
func.max(table1.c.description),
)
.where(table1.c.name == "name2")
.group_by(table1.c.myid, table1.c.name),
table1.select().where(table1.c.name == "name1"),
),
"SELECT mytable.myid, mytable.name, "
"max(mytable.description) AS max_1 "
"FROM mytable WHERE mytable.name = :name_1 "
"GROUP BY mytable.myid, "
"mytable.name UNION SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE mytable.name = :name_2",
)
self.assert_compile(
union(
select(literal(100).label("value")),
select(literal(200).label("value")),
),
"SELECT :param_1 AS value UNION SELECT :param_2 AS value",
)
self.assert_compile(
union_all(
select(table1.c.myid),
union(select(table2.c.otherid), select(table3.c.userid)),
),
"SELECT mytable.myid FROM mytable UNION ALL "
"(SELECT myothertable.otherid FROM myothertable UNION "
"SELECT thirdtable.userid FROM thirdtable)",
)
s = select(column("foo"), column("bar"))
self.assert_compile(
union(s.order_by("foo"), s.order_by("bar")),
"(SELECT foo, bar ORDER BY foo) UNION "
"(SELECT foo, bar ORDER BY bar)",
)
self.assert_compile(
union(
s.order_by("foo").self_group(),
s.order_by("bar").limit(10).self_group(),
),
"(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, "
"bar ORDER BY bar LIMIT :param_1)",
{"param_1": 10},
)
def test_dupe_cols_hey_we_can_union(self):
"""test the original inspiration for [ticket:4753]."""
s1 = select(table1, table1.c.myid).where(table1.c.myid == 5)
s2 = select(table1, table2.c.otherid).where(
table1.c.myid == table2.c.otherid
)
# note myid__1 is a dedupe of same column, same table. see
# test/sql/test_labels.py for the double underscore thing
self.assert_compile(
union(s1, s2).order_by(s1.selected_columns.myid),
"SELECT mytable.myid, mytable.name, mytable.description, "
"mytable.myid AS myid__1 FROM mytable "
"WHERE mytable.myid = :myid_1 "
"UNION SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid ORDER BY myid",
)
def test_deduping_hash_algo(self):
"""related to #7153.
testing new feature "add_hash" of _anon_label which adds an additional
integer value as part of what the anon label is deduplicated upon.
"""
class Thing(ColumnElement):
def __init__(self, hash_):
self._hash = hash_
def __hash__(self):
return self._hash
t1 = Thing(10)
t2 = Thing(11)
# this is the collision case. therefore we assert that this
# add_hash has to be below 16 bits.
# eq_(
# t1._anon_label('hi', add_hash=65537),
# t2._anon_label('hi', add_hash=1)
# )
with expect_raises(AssertionError):
t1._anon_label("hi", add_hash=65536)
for i in range(50):
ne_(
t1._anon_label("hi", add_hash=i),
t2._anon_label("hi", add_hash=1),
)
def test_deduping_unique_across_selects(self):
"""related to #7153
looking to see that dedupe anon labels use a unique hash not only
within each statement but across multiple statements.
"""
s1 = select(null(), null())
s2 = select(true(), true())
s3 = union(s1, s2)
self.assert_compile(
s3,
"SELECT NULL AS anon_1, NULL AS anon__1 UNION "
# without the feature tested in test_deduping_hash_algo we'd get
# "SELECT true AS anon_2, true AS anon__1",
"SELECT true AS anon_2, true AS anon__2",
dialect="default_enhanced",
)
def test_compound_grouping(self):
s = select(column("foo"), column("bar")).select_from(text("bat"))
self.assert_compile(
union(union(union(s, s), s), s),
"((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
union(s, s, s, s),
"SELECT foo, bar FROM bat UNION SELECT foo, bar "
"FROM bat UNION SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
union(s, union(s, union(s, s))),
"SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat))",
)
self.assert_compile(
select(s.alias()),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat) AS anon_1",
)
self.assert_compile(
select(union(s, s).alias()),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat UNION "
"SELECT foo, bar FROM bat) AS anon_1",
)
self.assert_compile(
select(except_(s, s).alias()),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1",
)
# this query sqlite specifically chokes on
self.assert_compile(
union(except_(s, s), s),
"(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
union(s, except_(s, s)),
"SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)",
)
# this solves it
self.assert_compile(
union(except_(s, s).alias().select(), s),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1 "
"UNION SELECT foo, bar FROM bat",
)
self.assert_compile(
except_(union(s, s), union(s, s)),
"(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)",
)
s2 = union(s, s)
s3 = union(s2, s2)
self.assert_compile(
s3,
"(SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat)",
)
self.assert_compile(
union(intersect(s, s), intersect(s, s)),
"(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat INTERSECT "
"SELECT foo, bar FROM bat)",
)
# tests for [ticket:2528]
# sqlite hates all of these.
self.assert_compile(
union(s.limit(1), s.offset(2)),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_2)",
)
self.assert_compile(
union(s.order_by(column("bar")), s.offset(2)),
"(SELECT foo, bar FROM bat ORDER BY bar) "
"UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_1)",
)
self.assert_compile(
union(
s.limit(1).alias("a").element, s.limit(2).alias("b").element
),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2)",
)
self.assert_compile(
union(s.limit(1).self_group(), s.limit(2).self_group()),
"(SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2)",
)
self.assert_compile(
union(s.limit(1), s.limit(2).offset(3)).alias().select(),
"SELECT anon_1.foo, anon_1.bar FROM "
"((SELECT foo, bar FROM bat LIMIT :param_1) "
"UNION (SELECT foo, bar FROM bat LIMIT :param_2 OFFSET :param_3)) "
"AS anon_1",
)
# this version works for SQLite
self.assert_compile(
union(s.limit(1).alias().select(), s.offset(2).alias().select()),
"SELECT anon_1.foo, anon_1.bar "
"FROM (SELECT foo, bar FROM bat"
" LIMIT :param_1) AS anon_1 "
"UNION SELECT anon_2.foo, anon_2.bar "
"FROM (SELECT foo, bar "
"FROM bat"
" LIMIT -1 OFFSET :param_2) AS anon_2",
)
def test_cast(self):
tbl = table(
"casttest",
column("id", Integer),
column("v1", Float),
column("v2", Float),
column("ts", TIMESTAMP),
)
def check_results(dialect, expected_results, literal):
eq_(
len(expected_results),
5,
"Incorrect number of expected results",
)
eq_(
str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)),
"CAST(casttest.v1 AS %s)" % expected_results[0],
)
eq_(
str(tbl.c.v1.cast(Numeric).compile(dialect=dialect)),
"CAST(casttest.v1 AS %s)" % expected_results[0],
)
eq_(
str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)),
"CAST(casttest.v1 AS %s)" % expected_results[1],
)
eq_(
str(cast(tbl.c.ts, Date).compile(dialect=dialect)),
"CAST(casttest.ts AS %s)" % expected_results[2],
)
eq_(
str(cast(1234, Text).compile(dialect=dialect)),
"CAST(%s AS %s)" % (literal, expected_results[3]),
)
eq_(
str(cast("test", String(20)).compile(dialect=dialect)),
"CAST(%s AS %s)" % (literal, expected_results[4]),
)
# fixme: shoving all of this dialect-specific stuff in one test
# is now officially completely ridiculous AND non-obviously omits
# coverage on other dialects.
sel = select(tbl, cast(tbl.c.v1, Numeric)).compile(dialect=dialect)
# TODO: another unusual result from disambiguate only:
# v1__1 vs v1_1 are due to the special meaning
# WrapsColumnExpression gives to the "_anon_name_label" attribute,
# where it tries to default to a label name that matches that of
# the column within.
if isinstance(dialect, type(mysql.dialect())):
eq_(
str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, "
"CAST(casttest.v1 AS DECIMAL) AS v1__1 \n"
"FROM casttest",
)
else:
eq_(
str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, CAST(casttest.v1 AS NUMERIC) AS "
"v1__1 \nFROM casttest",
)
sel = (
select(tbl, cast(tbl.c.v1, Numeric))
.set_label_style(LABEL_STYLE_NONE)
.compile(dialect=dialect)
)
# label style none - dupes v1
if isinstance(dialect, type(mysql.dialect())):
eq_(
str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, "
"CAST(casttest.v1 AS DECIMAL) AS v1 \n"
"FROM casttest",
)
else:
eq_(
str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, CAST(casttest.v1 AS NUMERIC) AS "
"v1 \nFROM casttest",
)
# first test with PostgreSQL engine
check_results(
postgresql.dialect(),
["NUMERIC", "NUMERIC(12, 9)", "DATE", "TEXT", "VARCHAR(20)"],
"%(param_1)s",
)
# then the Oracle engine
check_results(
oracle.dialect(),
["NUMERIC", "NUMERIC(12, 9)", "DATE", "CLOB", "VARCHAR2(20 CHAR)"],
":param_1",
)
# then the sqlite engine
check_results(
sqlite.dialect(),
["NUMERIC", "NUMERIC(12, 9)", "DATE", "TEXT", "VARCHAR(20)"],
"?",
)
# then the MySQL engine
check_results(
mysql.dialect(),
["DECIMAL", "DECIMAL(12, 9)", "DATE", "CHAR", "CHAR(20)"],
"%s",
)
self.assert_compile(
cast(text("NULL"), Integer),
"CAST(NULL AS INTEGER)",
dialect=sqlite.dialect(),
)
self.assert_compile(
cast(null(), Integer),
"CAST(NULL AS INTEGER)",
dialect=sqlite.dialect(),
)
self.assert_compile(
cast(literal_column("NULL"), Integer),
"CAST(NULL AS INTEGER)",
dialect=sqlite.dialect(),
)
@testing.combinations(
(
"default",
None,
"SELECT CAST(t1.txt AS VARCHAR(10)) AS txt FROM t1",
None,
),
(
"explicit_mssql",
"Latin1_General_CI_AS",
"SELECT CAST(t1.txt AS VARCHAR(10)) COLLATE Latin1_General_CI_AS AS txt FROM t1", # noqa
mssql.dialect(),
),
(
"explicit_mysql",
"utf8mb4_unicode_ci",
"SELECT CAST(t1.txt AS CHAR(10)) AS txt FROM t1",
mysql.dialect(),
),
(
"explicit_postgresql",
"en_US",
'SELECT CAST(t1.txt AS VARCHAR(10)) COLLATE "en_US" AS txt FROM t1', # noqa
postgresql.dialect(),
),
(
"explicit_sqlite",
"NOCASE",
'SELECT CAST(t1.txt AS VARCHAR(10)) COLLATE "NOCASE" AS txt FROM t1', # noqa
sqlite.dialect(),
),
id_="iaaa",
)
def test_cast_with_collate(self, collation_name, expected_sql, dialect):
t1 = Table(
"t1",
MetaData(),
Column("txt", String(10, collation=collation_name)),
)
stmt = select(func.cast(t1.c.txt, t1.c.txt.type))
self.assert_compile(stmt, expected_sql, dialect=dialect)
def test_over(self):
self.assert_compile(func.row_number().over(), "row_number() OVER ()")
self.assert_compile(
func.row_number().over(
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name], order_by=[table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name, order_by=table1.c.description
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=[table1.c.name, table1.c.description],
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.name, mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[], order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)",
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description], order_by=[]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)",
)
self.assert_compile(
func.row_number().over(partition_by=[], order_by=[]),
"row_number() OVER ()",
)
self.assert_compile(
select(
func.row_number()
.over(order_by=table1.c.description)
.label("foo")
),
"SELECT row_number() OVER (ORDER BY mytable.description) "
"AS foo FROM mytable",
)
# test from_obj generation.
# from func:
self.assert_compile(
select(func.max(table1.c.name).over(partition_by=["description"])),
"SELECT max(mytable.name) OVER (PARTITION BY mytable.description) "
"AS anon_1 FROM mytable",
)
# from partition_by
self.assert_compile(
select(func.row_number().over(partition_by=[table1.c.name])),
"SELECT row_number() OVER (PARTITION BY mytable.name) "
"AS anon_1 FROM mytable",
)
# from order_by
self.assert_compile(
select(func.row_number().over(order_by=table1.c.name)),
"SELECT row_number() OVER (ORDER BY mytable.name) "
"AS anon_1 FROM mytable",
)
# this tests that _from_objects
# concantenates OK
self.assert_compile(
select(column("x") + over(func.foo())),
"SELECT x + foo() OVER () AS anon_1",
)
# test a reference to a label that in the referecned selectable;
# this resolves
expr = (table1.c.myid + 5).label("sum")
stmt = select(expr).alias()
self.assert_compile(
select(stmt.c.sum, func.row_number().over(order_by=stmt.c.sum)),
"SELECT anon_1.sum, row_number() OVER (ORDER BY anon_1.sum) "
"AS anon_2 FROM (SELECT mytable.myid + :myid_1 AS sum "
"FROM mytable) AS anon_1",
)
# test a reference to a label that's at the same level as the OVER
# in the columns clause; doesn't resolve
expr = (table1.c.myid + 5).label("sum")
self.assert_compile(
select(expr, func.row_number().over(order_by=expr)),
"SELECT mytable.myid + :myid_1 AS sum, "
"row_number() OVER "
"(ORDER BY mytable.myid + :myid_1) AS anon_1 FROM mytable",
)
def test_over_framespec(self):
expr = table1.c.myid
self.assert_compile(
select(func.row_number().over(order_by=expr, rows=(0, None))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid ROWS BETWEEN CURRENT "
"ROW AND UNBOUNDED FOLLOWING)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select(func.row_number().over(order_by=expr, rows=(None, None))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid ROWS BETWEEN UNBOUNDED "
"PRECEDING AND UNBOUNDED FOLLOWING)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select(func.row_number().over(order_by=expr, rows=(-10, 1))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid ROWS BETWEEN "
":param_1 PRECEDING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 10, "param_2": 1},
)
RF = FrameClauseType.FOLLOWING
RP = FrameClauseType.PRECEDING
self.assert_compile(
select(
func.row_number().over(
order_by=expr,
rows=FrameClause(3, 2, RF, RP),
)
),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid ROWS BETWEEN "
":param_1 FOLLOWING AND :param_2 PRECEDING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 3, "param_2": 2},
)
self.assert_compile(
select(func.row_number().over(order_by=expr, range_=(None, 0))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
"UNBOUNDED PRECEDING AND CURRENT ROW)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select(func.row_number().over(order_by=expr, range_=(-5, 10))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 PRECEDING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 5, "param_2": 10},
)
self.assert_compile(
select(func.row_number().over(order_by=expr, range_=(1, 10))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 FOLLOWING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 1, "param_2": 10},
)
self.assert_compile(
select(func.row_number().over(order_by=expr, range_=(-10, -1))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 PRECEDING AND :param_2 PRECEDING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 10, "param_2": 1},
)
self.assert_compile(
select(
func.row_number().over(
order_by=expr, range_=FrameClause("a", "x", RP, RF)
)
),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid RANGE BETWEEN "
":param_1 PRECEDING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": "a", "param_2": "x"},
)
self.assert_compile(
select(func.row_number().over(order_by=expr, groups=(None, 0))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid GROUPS BETWEEN "
"UNBOUNDED PRECEDING AND CURRENT ROW)"
" AS anon_1 FROM mytable",
)
self.assert_compile(
select(func.row_number().over(order_by=expr, groups=(-5, 10))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid GROUPS BETWEEN "
":param_1 PRECEDING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 5, "param_2": 10},
)
self.assert_compile(
select(func.row_number().over(order_by=expr, groups=(1, 10))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid GROUPS BETWEEN "
":param_1 FOLLOWING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 1, "param_2": 10},
)
self.assert_compile(
select(func.row_number().over(order_by=expr, groups=(-10, -1))),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid GROUPS BETWEEN "
":param_1 PRECEDING AND :param_2 PRECEDING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 10, "param_2": 1},
)
self.assert_compile(
select(
func.row_number().over(
order_by=expr,
groups=FrameClause(1, 3, RP, RF),
)
),
"SELECT row_number() OVER "
"(ORDER BY mytable.myid GROUPS BETWEEN "
":param_1 PRECEDING AND :param_2 FOLLOWING)"
" AS anon_1 FROM mytable",
checkparams={"param_1": 1, "param_2": 3},
)
def test_over_invalid_framespecs(self):
with expect_raises_message(
exc.ArgumentError,
"Integer or None expected for values in rows/groups frame",
):
func.row_number().over(rows=("foo", 8))
with expect_raises_message(
exc.ArgumentError,
"Integer or None expected for values in rows/groups frame",
):
func.row_number().over(groups=(-5, "foo"))
with expect_raises_message(
exc.ArgumentError,
"When using a tuple to specify a range only integer or none "
"values are allowed in the range frame. To specify a "
"different type use the FrameClause directly.",
):
func.row_number().over(range_=(-5, "foo"))
with expect_raises_message(
exc.ArgumentError,
"2-tuple expected for range/rows/groups",
):
func.row_number().over(rows=("foo",))
with expect_raises_message(
exc.ArgumentError,
"2-tuple expected for range/rows/groups",
):
func.row_number().over(groups=(-5, "foo", 1))
with expect_raises_message(
exc.ArgumentError, "2-tuple expected for range/rows/groups"
):
func.row_number().over(range_=(-5,))
with expect_raises_message(
exc.ArgumentError,
"only one of 'rows', 'range_', or 'groups' may be provided",
):
func.row_number().over(range_=(-5, 8), rows=(-2, 5))
with expect_raises_message(
exc.ArgumentError,
"only one of 'rows', 'range_', or 'groups' may be provided",
):
func.row_number().over(range_=(-5, 8), groups=(None, None))
with expect_raises_message(
exc.ArgumentError,
"only one of 'rows', 'range_', or 'groups' may be provided",
):
func.row_number().over(rows=(-2, 5), groups=(None, None))
with expect_raises_message(
exc.ArgumentError,
"only one of 'rows', 'range_', or 'groups' may be provided",
):
func.row_number().over(
range_=(-5, 8), rows=(-2, 5), groups=(None, None)
)
with expect_raises_message(
exc.ArgumentError,
"Cannot specify a value for start with frame type " "CURRENT",
):
FrameClause(
5,
None,
FrameClauseType.CURRENT,
FrameClauseType.UNBOUNDED,
)
with expect_raises_message(
exc.ArgumentError,
"Cannot specify a value for end with frame type " "UNBOUNDED",
):
FrameClause(
None,
5,
FrameClauseType.CURRENT,
FrameClauseType.UNBOUNDED,
)
def test_over_within_group(self):
from sqlalchemy import within_group
stmt = select(
table1.c.myid,
within_group(func.percentile_cont(0.5), table1.c.name.desc()).over(
range_=(1, 2),
partition_by=table1.c.name,
order_by=table1.c.myid,
),
)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
"WITHIN GROUP (ORDER BY mytable.name DESC) "
"OVER (PARTITION BY mytable.name ORDER BY mytable.myid "
"RANGE BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) "
"AS anon_1 FROM mytable",
)
stmt = select(
table1.c.myid,
within_group(func.percentile_cont(0.5), table1.c.name.desc()).over(
rows=(1, 2),
partition_by=table1.c.name,
order_by=table1.c.myid,
),
)
eq_ignore_whitespace(
str(stmt),
"SELECT mytable.myid, percentile_cont(:percentile_cont_1) "
"WITHIN GROUP (ORDER BY mytable.name DESC) "
"OVER (PARTITION BY mytable.name ORDER BY mytable.myid "
"ROWS BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) "
"AS anon_1 FROM mytable",
)
def test_date_between(self):
import datetime
table = Table("dt", metadata, Column("date", Date))
self.assert_compile(
table.select().where(
table.c.date.between(
datetime.date(2006, 6, 1), datetime.date(2006, 6, 5)
)
),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={
"date_1": datetime.date(2006, 6, 1),
"date_2": datetime.date(2006, 6, 5),
},
)
self.assert_compile(
table.select().where(
sql.between(
table.c.date,
datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5),
)
),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={
"date_1": datetime.date(2006, 6, 1),
"date_2": datetime.date(2006, 6, 5),
},
)
def test_delayed_col_naming(self):
my_str = Column(String)
sel1 = select(my_str)
assert_raises_message(
exc.InvalidRequestError,
"Cannot initialize a sub-selectable with this Column",
lambda: sel1.subquery().c,
)
# calling label or scalar_subquery doesn't compile
# anything.
sel2 = select(func.substr(my_str, 2, 3)).label("my_substr")
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
sel2.compile,
dialect=default.DefaultDialect(),
)
sel3 = select(my_str).scalar_subquery()
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
sel3.compile,
dialect=default.DefaultDialect(),
)
my_str.name = "foo"
self.assert_compile(sel1, "SELECT foo")
self.assert_compile(
sel2, "(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)"
)
self.assert_compile(sel3, "(SELECT foo)")
def test_naming(self):
# TODO: the part where we check c.keys() are not "compile" tests, they
# belong probably in test_selectable, or some broken up
# version of that suite
f1 = func.hoho(table1.c.name)
s1 = select(
table1.c.myid,
table1.c.myid.label("foobar"),
f1,
func.lala(table1.c.name).label("gg"),
)
eq_(list(s1.subquery().c.keys()), ["myid", "foobar", "hoho", "gg"])
meta = MetaData()
t1 = Table("mytable", meta, Column("col1", Integer))
exprs = (
table1.c.myid == 12,
func.hoho(table1.c.myid),
cast(table1.c.name, Numeric),
literal("x"),
)
for col, key, expr, lbl in (
(table1.c.name, "name", "mytable.name", None),
(
exprs[0],
"_no_label",
"mytable.myid = :myid_1",
"anon_1",
),
(exprs[1], "hoho", "hoho(mytable.myid)", "hoho_1"),
(
exprs[2],
"name",
"CAST(mytable.name AS NUMERIC)",
"name", # due to [ticket:4449]
),
(t1.c.col1, "col1", "mytable.col1", None),
(
column("some wacky thing"),
"some wacky thing",
'"some wacky thing"',
"",
),
(
exprs[3],
"_no_label",
":param_1",
"anon_1",
),
):
if getattr(col, "table", None) is not None:
t = col.table
else:
t = table1
s1 = select(col).select_from(t)
eq_(col._proxy_key, key if key != "_no_label" else None)
eq_(list(s1.subquery().c.keys()), [key])
if lbl:
self.assert_compile(
s1, "SELECT %s AS %s FROM mytable" % (expr, lbl)
)
else:
self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,))
s1 = select(s1.subquery())
if lbl:
alias_ = "anon_2" if lbl == "anon_1" else "anon_1"
self.assert_compile(
s1,
"SELECT %s.%s FROM (SELECT %s AS %s FROM mytable) AS %s"
% (alias_, lbl, expr, lbl, alias_),
)
elif col.table is not None:
# sqlite rule labels subquery columns
self.assert_compile(
s1,
"SELECT anon_1.%s FROM (SELECT %s AS %s FROM mytable) "
"AS anon_1" % (key, expr, key),
)
else:
self.assert_compile(
s1,
"SELECT anon_1.%s FROM (SELECT %s FROM mytable) AS anon_1"
% (expr, expr),
)
def test_hints(self):
s = select(table1.c.myid).with_hint(table1, "test hint %(name)s")
s2 = (
select(table1.c.myid)
.with_hint(table1, "index(%(name)s idx)", "oracle")
.with_hint(table1, "WITH HINT INDEX idx", "mssql")
)
a1 = table1.alias()
s3 = select(a1.c.myid).with_hint(a1, "index(%(name)s hint)")
subs4 = (
select(table1, table2)
.select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)
)
.with_hint(table1, "hint1")
).subquery()
s4 = (
select(table3)
.select_from(
table3.join(subs4, subs4.c.othername == table3.c.otherstuff)
)
.with_hint(table3, "hint3")
)
t1 = table("QuotedName", column("col1"))
s6 = (
select(t1.c.col1)
.where(t1.c.col1 > 10)
.with_hint(t1, "%(name)s idx1")
)
a2 = t1.alias("SomeName")
s7 = (
select(a2.c.col1)
.where(a2.c.col1 > 10)
.with_hint(a2, "%(name)s idx1")
)
mysql_d, oracle_d, mssql_d = (
mysql.dialect(),
oracle.dialect(),
mssql.dialect(),
)
for stmt, dialect, expected in [
(s, mysql_d, "SELECT mytable.myid FROM mytable test hint mytable"),
(
s,
oracle_d,
"SELECT /*+ test hint mytable */ mytable.myid FROM mytable",
),
(
s,
mssql_d,
"SELECT mytable.myid FROM mytable test hint mytable",
),
(s2, mysql_d, "SELECT mytable.myid FROM mytable"),
(
s2,
oracle_d,
"SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable",
),
(
s2,
mssql_d,
"SELECT mytable.myid FROM mytable WITH HINT INDEX idx",
),
(
s3,
mysql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)",
),
(
s3,
oracle_d,
"SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM "
"mytable mytable_1",
),
(
s3,
mssql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)",
),
(
s4,
mysql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 INNER JOIN (SELECT mytable.myid AS myid, "
"mytable.name AS name, "
"mytable.description AS description, "
"myothertable.otherid AS otherid, "
"myothertable.othername AS othername FROM mytable hint1 INNER "
"JOIN myothertable ON "
"mytable.myid = myothertable.otherid) AS anon_1 "
"ON anon_1.othername = thirdtable.otherstuff",
),
(
s4,
mssql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 JOIN (SELECT mytable.myid AS myid, "
"mytable.name AS name, "
"mytable.description AS description, "
"myothertable.otherid AS otherid, "
"myothertable.othername AS othername FROM mytable hint1 "
"JOIN myothertable ON "
"mytable.myid = myothertable.otherid) AS anon_1 "
"ON anon_1.othername = thirdtable.otherstuff",
),
(
s4,
oracle_d,
"SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable JOIN (SELECT /*+ hint1 */ "
"mytable.myid AS myid,"
" mytable.name AS name, mytable.description AS description, "
"myothertable.otherid AS otherid,"
" myothertable.othername AS othername "
"FROM mytable JOIN myothertable ON "
"mytable.myid = myothertable.otherid) anon_1 ON "
"anon_1.othername = thirdtable.otherstuff",
),
(
s6,
oracle_d,
"""SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """
"""FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1""",
),
(
s7,
oracle_d,
"""SELECT /*+ "SomeName" idx1 */ "SomeName".col1 FROM """
""""QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1""",
),
]:
self.assert_compile(stmt, expected, dialect=dialect)
def test_statement_hints(self):
stmt = (
select(table1.c.myid)
.with_statement_hint("test hint one")
.with_statement_hint("test hint two", "mysql")
)
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable test hint one"
)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable test hint one test hint two",
dialect="mysql",
)
def test_literal_as_text_fromstring(self):
self.assert_compile(and_(text("a"), text("b")), "a AND b")
def test_literal_as_text_nonstring_raise(self):
assert_raises(exc.ArgumentError, and_, ("a",), ("b",))
| SelectTest |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_bundle.py | {
"start": 3104,
"end": 3522
} | class ____:
def test_no_objs_all_resources_bundled(self) -> None:
b = beb.bundle_for_objs_and_resources(None, ABSOLUTE)
assert any('bokeh-widgets' in f.url for f in b.js_files)
assert any('bokeh-gl' in f.url for f in b.js_files)
assert any('bokeh-tables' in f.url for f in b.js_files)
assert any('bokeh-mathjax' in f.url for f in b.js_files)
| Test_bundle_for_objs_and_resources |
python | getsentry__sentry | src/sentry/analytics/events/user_removed.py | {
"start": 69,
"end": 288
} | class ____(analytics.Event):
user_id: int
actor_id: int | None = None
deletion_request_datetime: str | None = None
deletion_datetime: str | None = None
analytics.register(UserRemovedEvent)
| UserRemovedEvent |
python | neetcode-gh__leetcode | python/1523-count-odd-numbers-in-an-interval-range.py | {
"start": 0,
"end": 168
} | class ____:
def countOdds(self, low: int, high: int) -> int:
if low%2!=0 or high%2!=0:
return (high-low)//2 +1
return (high-low)//2
| Solution |
python | viewflow__viewflow | viewflow/workflow/flow/views/filters.py | {
"start": 2809,
"end": 3019
} | class ____(FilterSet):
created = DateRangeFilter()
finished = NullDateRangeFilter()
class Meta:
model = Process
fields = ["status", "created", "finished"]
| DashboardProcessListViewFilter |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/responses.py | {
"start": 1646,
"end": 2786
} | class ____(BaseModel):
"""
Solr response body.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details.
"""
docs: list[dict[str, Any]]
"""Documents returned by Solr for the query.
Each document is a dictionary containing all of the fields specified in the
``fl`` parameter of the request (or a default if not provided).
"""
num_found: int
"""The number of documents returned by Solr."""
num_found_exact: bool
"""Whether the ``num_found`` value was approximated or not.
If ``True``, the real number of hits is guaranteed to be greater than or
equal to :py:attr:`.num_found`.
"""
start: int
"""The offset into the query's result set (for paginated queries)."""
model_config: ClassVar[ConfigDict] = ConfigDict(
alias_generator=alias_generators.to_camel, # generate camelCase aliases
extra="allow", # allow extra fields, for forward-compatability
populate_by_name=True, # allow both name and alias forms when building
)
| SolrSelectResponseBody |
python | python-visualization__folium | folium/plugins/fullscreen.py | {
"start": 161,
"end": 1930
} | class ____(JSCSSMixin, MacroElement):
"""
Adds a fullscreen button to your map.
Parameters
----------
position : str
change the position of the button can be:
'topleft', 'topright', 'bottomright' or 'bottomleft'
default: 'topleft'
title : str
change the title of the button,
default: 'Full Screen'
title_cancel : str
change the title of the button when fullscreen is on,
default: 'Exit Full Screen'
force_separate_button : bool, default False
force separate button to detach from zoom buttons,
See https://github.com/brunob/leaflet.fullscreen for more information.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
L.control.fullscreen(
{{ this.options|tojavascript }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
"""
) # noqa
default_js = [
(
"Control.Fullscreen.js",
"https://cdn.jsdelivr.net/npm/leaflet.fullscreen@3.0.0/Control.FullScreen.min.js",
)
]
default_css = [
(
"Control.FullScreen.css",
"https://cdn.jsdelivr.net/npm/leaflet.fullscreen@3.0.0/Control.FullScreen.css",
)
]
def __init__(
self,
position="topleft",
title="Full Screen",
title_cancel="Exit Full Screen",
force_separate_button=False,
**kwargs
):
super().__init__()
self._name = "Fullscreen"
self.options = remove_empty(
position=position,
title=title,
title_cancel=title_cancel,
force_separate_button=force_separate_button,
**kwargs
)
| Fullscreen |
python | PyCQA__pylint | tests/functional/u/unsupported/unsupported_version_for_final.py | {
"start": 374,
"end": 611
} | class ____:
@final # [using-final-decorator-in-unsupported-version]
@final # [using-final-decorator-in-unsupported-version]
def my_method(self):
pass
@myfinal # [using-final-decorator-in-unsupported-version]
| MyClass1 |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/security_manager/test_override.py | {
"start": 1220,
"end": 9092
} | class ____:
def test_load_user(self):
sm = EmptySecurityManager()
sm.get_user_by_id = Mock()
sm.load_user("123")
sm.get_user_by_id.assert_called_once_with(123)
@mock.patch("airflow.providers.fab.auth_manager.security_manager.override.g", spec={})
def test_load_user_jwt(self, mock_g):
sm = EmptySecurityManager()
mock_user = Mock()
sm.load_user = Mock(return_value=mock_user)
actual_user = sm.load_user_jwt(None, {"sub": "test_identity"})
sm.load_user.assert_called_once_with("test_identity")
assert actual_user is mock_user
assert mock_g.user is mock_user
@mock.patch("airflow.providers.fab.auth_manager.security_manager.override.check_password_hash")
def test_check_password(self, check_password):
sm = EmptySecurityManager()
mock_user = Mock()
sm.find_user = Mock(return_value=mock_user)
check_password.return_value = True
assert sm.check_password("test_user", "test_password")
@mock.patch("airflow.providers.fab.auth_manager.security_manager.override.check_password_hash")
def test_check_password_user_not_found(self, check_password):
sm = EmptySecurityManager()
sm.find_user = Mock(return_value=None)
check_password.return_value = False
assert not sm.check_password("test_user", "test_password")
@mock.patch("airflow.providers.fab.auth_manager.security_manager.override.check_password_hash")
def test_check_password_not_match(self, check_password):
sm = EmptySecurityManager()
mock_user = Mock()
sm.find_user = Mock(return_value=mock_user)
check_password.return_value = False
assert not sm.check_password("test_user", "test_password")
@pytest.mark.parametrize(
("provider", "resp", "user_info"),
[
("github", {"login": "test"}, {"username": "github_test"}),
("githublocal", {"login": "test"}, {"username": "github_test"}),
("twitter", {"screen_name": "test"}, {"username": "twitter_test"}),
(
"linkedin",
{"id": "test", "firstName": "John", "lastName": "Doe", "email-address": "test@example.com"},
{
"username": "linkedin_test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
},
),
(
"google",
{"id": "test", "given_name": "John", "family_name": "Doe", "email": "test@example.com"},
{
"username": "google_test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
},
),
(
"azure",
{
"oid": "test",
"given_name": "John",
"family_name": "Doe",
"email": "test@example.com",
"roles": ["admin"],
},
{
"username": "test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
(
"azure",
{
"oid": "test",
"given_name": "John",
"family_name": "Doe",
"upn": "test@example.com",
"roles": ["admin"],
},
{
"username": "test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
("openshift", {"metadata": {"name": "test"}}, {"username": "openshift_test"}),
(
"okta",
{
"sub": "test",
"given_name": "John",
"family_name": "Doe",
"email": "test@example.com",
"groups": ["admin"],
},
{
"username": "okta_test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
("okta", {"error": "access_denied", "error_description": "Invalid bearer token."}, {}),
(
"auth0",
{
"sub": "test",
"given_name": "John",
"family_name": "Doe",
"email": "test@example.com",
"groups": ["admin"],
},
{
"username": "auth0_test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
(
"keycloak",
{
"preferred_username": "test",
"given_name": "John",
"family_name": "Doe",
"email": "test@example.com",
"groups": ["admin"],
},
{
"username": "test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
(
"keycloak_before_17",
{
"preferred_username": "test",
"given_name": "John",
"family_name": "Doe",
"email": "test@example.com",
"groups": ["admin"],
},
{
"username": "test",
"first_name": "John",
"last_name": "Doe",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
(
"authentik",
{
"nickname": "test",
"given_name": "John",
"preferred_username": "test@example.com",
"groups": ["admin"],
},
{
"username": "test",
"first_name": "John",
"email": "test@example.com",
"role_keys": ["admin"],
},
),
(
"other",
{"preferred_username": "test", "email": "test@example.com"},
{
"username": "test",
"first_name": "",
"last_name": "",
"email": "test@example.com",
"role_keys": [],
},
),
],
)
def test_get_oauth_user_info(self, provider, resp, user_info):
sm = EmptySecurityManager()
sm.appbuilder = Mock(sm=sm)
sm.oauth_remotes = {}
sm.oauth_remotes[provider] = Mock(
get=Mock(return_value=Mock(json=Mock(return_value=resp))),
userinfo=Mock(return_value=resp),
)
sm._decode_and_validate_azure_jwt = Mock(return_value=resp)
sm._get_authentik_token_info = Mock(return_value=resp)
assert sm.get_oauth_user_info(provider, {"id_token": None}) == user_info
| TestFabAirflowSecurityManagerOverride |
python | django__django | tests/prefetch_related/tests.py | {
"start": 68031,
"end": 68864
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
LessonEntry.objects.bulk_create(
LessonEntry(id=id_, name1=name1, name2=name2)
for id_, name1, name2 in [
(1, "einfach", "simple"),
(2, "schwierig", "difficult"),
]
)
WordEntry.objects.bulk_create(
WordEntry(id=id_, lesson_entry_id=lesson_entry_id, name=name)
for id_, lesson_entry_id, name in [
(1, 1, "einfach"),
(2, 1, "simple"),
(3, 2, "schwierig"),
(4, 2, "difficult"),
]
)
def test_bug(self):
list(
WordEntry.objects.prefetch_related(
"lesson_entry", "lesson_entry__wordentry_set"
)
)
| Ticket19607Tests |
python | scipy__scipy | scipy/special/_generate_pyx.py | {
"start": 16388,
"end": 28766
} | class ____(Func):
"""
Ufunc signature, restricted format suitable for special functions.
Parameters
----------
name
Name of the ufunc to create
signature
String of form 'func: fff*ff->f, func2: ddd->*i' describing
the C-level functions and types of their input arguments
and return values.
The syntax is
'function_name: inputparams*outputparams->output_retval*ignored_retval'
Attributes
----------
name : str
Python name for the Ufunc
signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name)
List of parsed signatures
doc : str
Docstring, obtained from add_newdocs
function_name_overrides : dict of str->str
Overrides for the function names in signatures
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = add_newdocs.get(name)
if self.doc is None:
raise ValueError(f"No docstring for ufunc {name!r}")
self.doc = textwrap.dedent(self.doc).strip()
def _get_signatures_and_loops(self, all_loops):
inarg_num = None
outarg_num = None
seen = set()
variants = []
def add_variant(func_name, inarg, outarg, ret, inp, outp):
if inp in seen:
return
seen.add(inp)
sig = (func_name, inp, outp)
if "v" in outp:
raise ValueError(f"{self.name}: void signature {sig!r}")
if len(inp) != inarg_num or len(outp) != outarg_num:
raise ValueError(f"{self.name}: signature {sig!r} does "
f"not have {inarg_num}/{outarg_num} input/output args")
loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp)
all_loops[loop_name] = loop
variants.append((func_name, loop_name, inp, outp))
# First add base variants
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then the supplementary ones
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
for inp, outp in iter_variants(inarg, outp):
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then sort variants to input argument cast order
# -- the sort is stable, so functions earlier in the signature list
# are still preferred
variants.sort(key=lambda v: cast_order(v[2]))
return variants, inarg_num, outarg_num
def generate(self, all_loops):
toplevel = ""
variants, inarg_num, outarg_num = self._get_signatures_and_loops(
all_loops)
loops = []
funcs = []
types = []
for func_name, loop_name, inputs, outputs in variants:
for x in inputs:
types.append(TYPE_NAMES[x])
for x in outputs:
types.append(TYPE_NAMES[x])
loops.append(loop_name)
funcs.append(func_name)
toplevel += (
f"cdef np.PyUFuncGenericFunction ufunc_{self.name}_loops[{len(loops)}]\n"
)
toplevel += f"cdef void *ufunc_{self.name}_ptr[{2 * len(funcs)}]\n"
toplevel += f"cdef void *ufunc_{self.name}_data[{len(funcs)}]\n"
toplevel += f"cdef char ufunc_{self.name}_types[{len(types)}]\n"
toplevel += 'cdef char *ufunc_{}_doc = (\n "{}")\n'.format(
self.name,
self.doc.replace("\\", "\\\\").replace('"', '\\"')
.replace('\n', '\\n\"\n "')
)
for j, function in enumerate(loops):
toplevel += (f"ufunc_{self.name}_loops[{j}] = "
f"<np.PyUFuncGenericFunction>{function}\n")
for j, type in enumerate(types):
toplevel += f"ufunc_{self.name}_types[{j}] = <char>{type}\n"
for j, func in enumerate(funcs):
toplevel += (f"ufunc_{self.name}_ptr[2*{j}] = <void*>"
f"{self.cython_func_name(func, specialized=True)}\n")
toplevel += (f"ufunc_{self.name}_ptr[2*{j}+1] = <void*>"
f"(<char*>\"{self.name}\")\n")
for j, func in enumerate(funcs):
toplevel += f"ufunc_{self.name}_data[{j}] = &ufunc_{self.name}_ptr[2*{j}]\n"
toplevel += (f"@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, ufunc_@_data, "
f"ufunc_@_types, {int(len(types)/(inarg_num + outarg_num))}, "
f"{inarg_num}, {outarg_num}, 0, '@', ufunc_@_doc, 0)"
f"\n").replace('@', self.name)
return toplevel
def get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename):
"""
Construct a Cython declaration of a function coming either from a
pxd or a header file. Do sufficient tricks to enable compile-time
type checking against the signature expected by the ufunc.
"""
defs = []
defs_h = []
var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_')
if header.endswith('.pxd'):
defs.append(
f"from .{header[:-4]} cimport {ufunc.cython_func_name(c_name, prefix='')}"
f" as {ufunc.cython_func_name(c_name)}")
# check function signature at compile time
proto_name = f'_proto_{var_name}_t'
defs.append(f"ctypedef {cy_proto.replace('(*)', proto_name)}")
defs.append(f"cdef {proto_name} *{proto_name}_var = "
f"&{ufunc.cython_func_name(c_name, specialized=True)}")
else:
# redeclare the function, so that the assumed
# signature is checked at compile time
new_name = f"{ufunc.cython_func_name(c_name)} \"{c_name}\""
proto_h_filename = os.path.basename(proto_h_filename)
defs.append(f'cdef extern from r"{proto_h_filename}":')
defs.append(f" cdef {cy_proto.replace('(*)', new_name)}")
defs_h.append(f'#include "{header}"')
defs_h.append(f"{c_proto.replace('(*)', c_name)};")
return defs, defs_h, var_name
def generate_ufuncs(fn_prefix, cxx_fn_prefix, ufuncs):
filename = fn_prefix + ".pyx"
proto_h_filename = fn_prefix + '_defs.h'
cxx_proto_h_filename = cxx_fn_prefix + '_defs.h'
cxx_pyx_filename = cxx_fn_prefix + ".pyx"
cxx_pxd_filename = cxx_fn_prefix + ".pxd"
toplevel = ""
# for _ufuncs*
defs = []
defs_h = []
all_loops = {}
# for _ufuncs_cxx*
cxx_defs = []
cxx_pxd_defs = [
"from . cimport sf_error",
"cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) "
"noexcept nogil"
]
cxx_defs_h = []
ufuncs.sort(key=lambda u: u.name)
for ufunc in ufuncs:
# generate function declaration and type checking snippets
cfuncs = ufunc.get_prototypes()
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
header = header[:-2]
# for the CXX module
item_defs, item_defs_h, var_name = get_declaration(
ufunc, c_name, c_proto, cy_proto, header, cxx_proto_h_filename
)
cxx_defs.extend(item_defs)
cxx_defs_h.extend(item_defs_h)
func_name = ufunc.cython_func_name(
c_name, specialized=True, override=False
)
cxx_defs.append(f"cdef void *_export_{var_name} = <void*>{func_name}")
cxx_pxd_defs.append(f"cdef void *_export_{var_name}")
# let cython grab the function pointer from the c++ shared library
ufunc.function_name_overrides[c_name] = (
"scipy.special._ufuncs_cxx._export_" + var_name
)
else:
# usual case
item_defs, item_defs_h, _ = get_declaration(
ufunc, c_name, c_proto, cy_proto, header, proto_h_filename
)
defs.extend(item_defs)
defs_h.extend(item_defs_h)
# ufunc creation code snippet
t = ufunc.generate(all_loops)
toplevel += t + "\n"
# Produce output
toplevel = "\n".join(sorted(all_loops.values()) + defs + [toplevel])
# Generate an `__all__` for the module
all_ufuncs = (
[
f"'{ufunc.name}'"
for ufunc in ufuncs if not ufunc.name.startswith('_')
]
+ ["'geterr'", "'seterr'", "'errstate'", "'jn'"] +
[
f"'{name}'"
for name in special_ufuncs if not name.startswith('_')
]
)
module_all = f"__all__ = [{', '.join(all_ufuncs)}]"
with open(filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write(UFUNCS_EXTRA_CODE)
f.write(module_all)
f.write("\n")
f.write(toplevel)
f.write(UFUNCS_EXTRA_CODE_BOTTOM)
defs_h = unique(defs_h)
with open(proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(defs_h))
f.write("\n#endif\n")
cxx_defs_h = unique(cxx_defs_h)
with open(cxx_proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(cxx_defs_h))
f.write("\n#endif\n")
with open(cxx_pyx_filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write("\n")
f.write("\n".join(cxx_defs))
with open(cxx_pxd_filename, 'w') as f:
f.write("\n".join(cxx_pxd_defs))
def unique(lst):
"""
Return a list without repeated entries (first occurrence is kept),
preserving order.
"""
seen = set()
new_lst = []
for item in lst:
if item in seen:
continue
seen.add(item)
new_lst.append(item)
return new_lst
def newer(source, target):
"""
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
"""
if not os.path.exists(source):
raise ValueError(f"file '{os.path.abspath(source)}' does not exist")
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
def all_newer(src_files, dst_files):
return all(os.path.exists(dst) and newer(dst, src)
for dst in dst_files for src in src_files)
def main(outdir):
pwd = os.path.dirname(__file__)
src_files = (os.path.abspath(__file__),
os.path.abspath(os.path.join(pwd, 'functions.json')),
os.path.abspath(os.path.join(pwd, '_add_newdocs.py')))
dst_files = ('_ufuncs.pyx',
'_ufuncs_defs.h',
'_ufuncs_cxx.pyx',
'_ufuncs_cxx.pxd',
'_ufuncs_cxx_defs.h')
dst_files = (os.path.join(outdir, f) for f in dst_files)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print("scipy/special/_generate_pyx.py: all files up-to-date")
return
ufuncs = []
with open('functions.json') as data:
functions = json.load(data)
for f, sig in functions.items():
if (f not in special_ufuncs):
ufuncs.append(Ufunc(f, sig))
generate_ufuncs(os.path.join(outdir, "_ufuncs"),
os.path.join(outdir, "_ufuncs_cxx"),
ufuncs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.outdir:
raise ValueError("Missing `--outdir` argument to _generate_pyx.py")
else:
outdir_abs = os.path.join(os.getcwd(), args.outdir)
main(outdir_abs)
| Ufunc |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 69341,
"end": 134190
} | class ____(Sam3TrackerVideoPreTrainedModel):
input_modalities = ("video", "text")
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam3TrackerVideoTwoWayAttentionBlock, index=2)}
_keys_to_ignore_on_load_unexpected = [r"^detector_model."]
_tied_weights_keys = {}
_keys_to_ignore_on_load_missing = []
_checkpoint_conversion_mapping = {
r"tracker_model.(.+)": r"\1", # the regex allows to remove the prefix, and add it back in revert mode
"detector_model.vision_encoder.backbone.": "vision_encoder.backbone.",
"tracker_neck.": "vision_encoder.neck.",
}
def __init__(self, config: Sam3TrackerVideoConfig, remove_vision_encoder: bool = False):
r"""
remove_vision_encoder (`bool`, *optional*, defaults to `False`):
Whether to remove the vision encoder. If True, the vision encoder will be set to None.
"""
# loading from a sam3_video config
if hasattr(config, "tracker_config") and config.tracker_config is not None:
tracker_config = config.tracker_config
if isinstance(tracker_config, dict):
tracker_config = Sam3TrackerVideoConfig(**tracker_config)
config = tracker_config
super().__init__(config)
self.shared_image_embedding = Sam3TrackerVideoPositionalEmbedding(config.prompt_encoder_config)
self.vision_encoder = AutoModel.from_config(config.vision_config) if not remove_vision_encoder else None
self.prompt_encoder = Sam3TrackerVideoPromptEncoder(config.prompt_encoder_config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = Sam3TrackerVideoMaskDecoder(config.mask_decoder_config)
self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes
# a single token to indicate no memory embedding from previous frames
self.hidden_dim = config.vision_config.fpn_hidden_size
self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.config = config
# For video sequence inference
self.image_size = config.image_size
self.memory_attention = Sam3TrackerVideoMemoryAttention(config)
self.memory_encoder = Sam3TrackerVideoMemoryEncoder(config)
self.no_memory_positional_encoding = torch.nn.Parameter(
torch.zeros(1, 1, config.vision_config.fpn_hidden_size)
)
self.mem_dim = config.memory_encoder_output_channels
self.num_maskmem = config.num_maskmem # Number of memories accessible
# Temporal encoding of the memories
self.memory_temporal_positional_encoding = torch.nn.Parameter(
torch.zeros(self.num_maskmem, 1, 1, self.mem_dim)
)
self.no_object_pointer = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
# A conv layer to downsample the mask prompt to stride 4 (the same stride as
# low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
# so that it can be fed into the SAM mask decoder to generate a pointer.
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
# a feedforward layer on SAM output tokens to turn them into object pointers
self.object_pointer_proj = Sam3TrackerVideoFeedForward(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
if self.config.enable_temporal_pos_encoding_for_object_pointers:
# a linear projection on temporal positional encoding in object pointers to
# avoid potential interference with spatial positional encoding
self.temporal_positional_encoding_projection_layer = torch.nn.Linear(self.hidden_dim, self.mem_dim)
else:
self.temporal_positional_encoding_projection_layer = torch.nn.Identity()
self.occlusion_spatial_embedding_parameter = None # compatibility with Sam2
if config.enable_occlusion_spatial_embedding:
self.occlusion_spatial_embedding_parameter = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
self.post_init()
def get_input_embeddings(self):
return self.vision_encoder.get_input_embeddings()
def get_image_wide_positional_embeddings(self) -> torch.Tensor:
size = self.prompt_encoder.image_embedding_size
target_device = self.shared_image_embedding.positional_embedding.device
target_dtype = self.shared_image_embedding.positional_embedding.dtype
grid = torch.ones(size, device=target_device, dtype=target_dtype)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / size[0]
x_embed = x_embed / size[1]
positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))
return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width
@torch.no_grad()
def get_image_embeddings(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> list[torch.Tensor]:
r"""
Returns the image embeddings by passing the pixel values through the vision encoder.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Input pixel values
"""
batch_size = pixel_values.shape[0]
feature_maps, _, _, _ = self.get_image_features(pixel_values, **kwargs)
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
return image_embeddings
@torch.no_grad()
def get_prompt_embeddings(
self,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
r"""
Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
Args:
input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
Optional input points for the prompt encoder. The padding of the point is automatically done by the
processor. `point_batch_size` refers to the number of masks that we want the model to predict per
point. The model will output `point_batch_size` times 3 masks in total.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
processor, or can be fed by the user.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):
Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
processor. users can also pass manually the input boxes.
input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):
Optional input masks for the prompt encoder.
"""
prompt_output = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
return prompt_output
@torch.inference_mode()
@auto_docstring(custom_intro="Propagate the objects through a streamed video frame.")
def forward(
self,
inference_session: Sam3TrackerVideoInferenceSession,
frame_idx: Optional[int] = None,
frame: Optional[torch.Tensor] = None,
reverse: bool = False,
run_mem_encoder: bool = True,
) -> Sam3TrackerVideoSegmentationOutput:
r"""
inference_session (`Sam3TrackerVideoInferenceSession`):
The video inference session object.
frame_idx (`int`, *optional*):
The index of the frame on which to run inference. No need to provide when inferring
on a new streamed frame.
frame (`torch.Tensor`, *optional*):
The frame to process. Provide when streaming.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
run_mem_encoder (`bool`, *optional*, defaults to `True`):
Whether to run the memory encoder on predicted masks. The memory encoder is batched across all objects for efficiency.
"""
if frame is not None:
frame_idx = inference_session.add_new_frame(frame, frame_idx)
if frame is not None and inference_session.get_obj_num() == 0:
raise ValueError("No objects are provided for tracking; please add inputs first.")
num_objects = inference_session.get_obj_num()
pred_masks_per_obj = [None] * num_objects
object_score_logits_per_obj = [None] * num_objects
# Collect data for batched memory encoding
objects_needing_memory_encoding = []
high_res_masks_for_memory = []
object_score_logits_for_memory = []
is_mask_from_pts_per_obj = []
# Note: We avoid batched inference here because per-object inputs (clicks/masks)
# can differ across objects.
for obj_idx in range(num_objects):
obj_id = inference_session.obj_idx_to_id(obj_idx)
has_new_inputs = obj_id in inference_session.obj_with_new_inputs
has_cond_output = frame_idx in inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"]
# If this object has no new inputs and this frame already has a
# conditioning output, reuse the cached masks instead of recomputing.
if (not has_new_inputs) and has_cond_output:
pred_masks = inference_session.get_output(obj_idx, frame_idx, "pred_masks", is_conditioning_frame=True)
object_score_logits = inference_session.get_output(
obj_idx, frame_idx, "object_score_logits", is_conditioning_frame=True
)
is_init_cond_frame = True
else:
# Defaults when there are no new inputs
is_init_cond_frame = False
point_inputs = None
mask_inputs = None
if has_new_inputs:
is_init_cond_frame = frame_idx not in inference_session.frames_tracked_per_obj[obj_idx]
if is_init_cond_frame:
reverse = False
point_inputs = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None)
mask_inputs = inference_session.mask_inputs_per_obj[obj_idx].get(frame_idx, None)
if point_inputs is not None or mask_inputs is not None:
inference_session.obj_with_new_inputs.remove(obj_id)
current_out = self._run_single_frame_inference(
inference_session=inference_session,
obj_idx=obj_idx,
frame_idx=frame_idx,
batch_size=1, # run on the slice of a single object
is_init_cond_frame=is_init_cond_frame,
point_inputs=point_inputs,
mask_inputs=mask_inputs,
reverse=reverse,
streaming=frame is not None,
)
inference_session.store_output(
obj_idx, frame_idx, output_value=current_out, is_conditioning_frame=is_init_cond_frame
)
pred_masks = current_out["pred_masks"]
object_score_logits = current_out["object_score_logits"]
# Collect data for batched memory encoding
if run_mem_encoder and self.num_maskmem > 0:
objects_needing_memory_encoding.append(obj_idx)
high_res_masks_for_memory.append(current_out["high_res_masks"])
object_score_logits_for_memory.append(object_score_logits)
is_mask_from_pts_per_obj.append(point_inputs is not None or mask_inputs is not None)
pred_masks_per_obj[obj_idx] = pred_masks
object_score_logits_per_obj[obj_idx] = object_score_logits.squeeze(-1)
if not is_init_cond_frame:
# only for tracked frames, not for initial conditioning frames
inference_session.frames_tracked_per_obj[obj_idx][frame_idx] = {"reverse": reverse}
# Batch encode memories for all objects at once
self._batch_encode_memories(
inference_session=inference_session,
frame_idx=frame_idx,
objects_needing_memory_encoding=objects_needing_memory_encoding,
high_res_masks_for_memory=high_res_masks_for_memory,
object_score_logits_for_memory=object_score_logits_for_memory,
is_mask_from_pts_per_obj=is_mask_from_pts_per_obj,
)
# Resize the output mask to the original video resolution (we directly use
# the mask scores on GPU for output to avoid any CPU conversion in between)
if len(pred_masks_per_obj) > 1:
all_pred_masks = torch.cat(pred_masks_per_obj, dim=0)
all_object_score_logits = torch.cat(object_score_logits_per_obj, dim=0)
else:
all_pred_masks = pred_masks_per_obj[0]
all_object_score_logits = object_score_logits_per_obj[0]
return Sam3TrackerVideoSegmentationOutput(
object_ids=inference_session.obj_ids.copy(),
pred_masks=all_pred_masks,
object_score_logits=all_object_score_logits,
frame_idx=frame_idx,
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[
list[torch.Tensor],
list[torch.Tensor],
Optional[tuple[torch.FloatTensor, ...]],
Optional[tuple[torch.FloatTensor, ...]],
]:
r"""
Extract and preprocess image features using the vision encoder.
Args:
pixel_values (`torch.FloatTensor`):
Input pixel values of shape `(batch_size, num_channels, height, width)`.
Returns:
`tuple`: A tuple containing:
- feature_maps (`list[torch.Tensor]`): List of feature maps from different levels.
- feature_maps_position_embeddings (`list[torch.Tensor]`): List of positional embeddings for each feature level.
- vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Hidden states from the vision encoder.
- vision_attentions (`tuple[torch.FloatTensor]`, *optional*): Attention weights from the vision encoder.
"""
vision_outputs: Sam3TrackerVideoVisionEncoderOutput = self.vision_encoder(
pixel_values,
**kwargs,
)
feature_maps = vision_outputs.fpn_hidden_states
feature_maps_position_embeddings = vision_outputs.fpn_position_encoding
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(feature_maps[:-1])
feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in feature_maps_position_embeddings[:-1]
]
return feature_maps, feature_maps_position_embeddings, vision_outputs.hidden_states, vision_outputs.attentions
def _prepare_vision_features(
self,
inference_session: Sam3TrackerVideoInferenceSession,
frame_idx: int,
batch_size: int,
) -> tuple[torch.Tensor, list[torch.Tensor]]:
"""Prepare vision features for a frame."""
# Check if features are cached
if cached_features := inference_session.cache.get_vision_features(frame_idx):
vision_feats = cached_features["vision_feats"]
vision_pos_embeds = cached_features["vision_pos_embeds"]
else:
# Compute features using image encoder
image_batch = inference_session.get_frame(frame_idx).unsqueeze(0) # Add batch dimension
vision_feats, vision_pos_embeds, _, _ = self.get_image_features(image_batch)
# Cache features
inference_session.cache.cache_vision_features(
frame_idx, {"vision_feats": vision_feats, "vision_pos_embeds": vision_pos_embeds}
)
# Expand to batch size if needed
if batch_size > 1:
vision_feats = vision_feats.expand(batch_size, -1, -1, -1)
vision_pos_embeds = [pe.expand(batch_size, -1, -1, -1) for pe in vision_pos_embeds]
return vision_feats, vision_pos_embeds
def _single_frame_forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
multimask_output: bool = True,
attention_similarity: Optional[torch.FloatTensor] = None,
target_embedding: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Sam3TrackerVideoImageSegmentationOutput:
"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
"""
if not ((pixel_values is None) ^ (image_embeddings is None)):
raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.")
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)
elif input_points is not None:
num_objects = input_points.shape[1]
elif input_boxes is not None:
num_objects = input_boxes.shape[1]
elif input_masks is not None:
num_objects = input_masks.shape[1]
else:
num_objects = 1
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
vision_attentions = None
vision_hidden_states = None
if pixel_values is not None:
feature_maps, _, vision_hidden_states, vision_attentions = self.get_image_features(
pixel_values,
**kwargs,
)
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
if input_points is None and input_boxes is None:
# If no points are provide, pad with an empty point (with label -1)
input_points = torch.zeros(
batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device
)
input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device)
if input_masks is not None:
# If mask_inputs is provided, downsize it into low-res mask input if needed
# and feed it as a dense mask prompt into the SAM mask encoder
if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size:
input_masks = F.interpolate(
input_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(input_masks.dtype)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
low_res_multimasks, iou_scores, sam_output_tokens, object_score_logits = self.mask_decoder(
image_embeddings=image_embeddings[-1],
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
high_resolution_features=image_embeddings[:-1],
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
is_obj_appearing = object_score_logits > 0
# Mask used for spatial memories is always a *hard* choice between obj and no obj,
# consistent with the actual mask prediction
low_res_multimasks = torch.where(
is_obj_appearing[:, None, None],
low_res_multimasks,
NO_OBJ_SCORE,
)
# convert masks from possibly bfloat16 (or float16) to float32
# (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
high_res_multimasks = (
F.interpolate(
low_res_multimasks.squeeze(1).float(),
size=(self.image_size, self.image_size),
mode="bilinear",
align_corners=False,
)
.unsqueeze(1)
.to(low_res_multimasks.dtype)
)
sam_output_token = sam_output_tokens[:, :, 0]
if multimask_output:
# take the best mask prediction (with the highest IoU estimation)
best_iou_inds = torch.argmax(iou_scores, dim=-1)
batch_inds = torch.arange(batch_size, device=high_res_multimasks.device)
object_batch_inds = torch.arange(num_objects, device=high_res_multimasks.device)
low_res_masks = low_res_multimasks[batch_inds, object_batch_inds, best_iou_inds]
high_res_masks = high_res_multimasks[batch_inds, object_batch_inds, best_iou_inds]
if sam_output_tokens.size(2) > 1:
sam_output_token = sam_output_tokens[batch_inds, object_batch_inds, best_iou_inds]
else:
low_res_masks, high_res_masks = low_res_multimasks[:, :, 0], high_res_multimasks[:, :, 0]
# Extract object pointer from the SAM output token (with occlusion handling)
object_pointer = self.object_pointer_proj(sam_output_token)
lambda_is_obj_appearing = is_obj_appearing.to(object_pointer.dtype)
object_pointer = lambda_is_obj_appearing * object_pointer
object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer
return Sam3TrackerVideoImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_masks,
high_res_masks=high_res_masks,
object_pointer=object_pointer,
object_score_logits=object_score_logits,
image_embeddings=image_embeddings,
vision_hidden_states=vision_hidden_states,
vision_attentions=vision_attentions,
)
def _use_mask_as_output(
self,
backbone_features: torch.Tensor,
high_res_features: list[torch.Tensor],
mask_inputs: torch.Tensor,
) -> Sam3TrackerVideoImageSegmentationOutput:
"""
Directly turn binary `mask_inputs` into a output mask logits without using SAM.
(same input and output shapes as in forward above).
"""
# Use -10/+20 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
mask_inputs_float = mask_inputs.to(backbone_features[0].dtype)
# Ensure mask is at self.image_size resolution for consistency
if mask_inputs_float.shape[-2:] != (self.image_size, self.image_size):
mask_inputs_float = F.interpolate(
mask_inputs_float.float(),
size=(self.image_size, self.image_size),
align_corners=False,
mode="bilinear",
antialias=True,
).to(mask_inputs.dtype)
high_res_masks = mask_inputs_float * out_scale + out_bias
low_res_masks = F.interpolate(
high_res_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(backbone_features[0].dtype)
# a dummy IoU prediction of all 1's under mask input
iou_scores = mask_inputs.new_ones(mask_inputs.size(0), 1).to(backbone_features[0].dtype)
# produce an object pointer using the SAM decoder from the mask input
object_pointer = self._single_frame_forward(
input_masks=self.mask_downsample(mask_inputs_float.to(backbone_features[0].dtype)),
image_embeddings=high_res_features + [backbone_features],
).object_pointer
# In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
# Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
# on the object_scores from the SAM decoder.
is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
is_obj_appearing = is_obj_appearing[..., None]
lambda_is_obj_appearing = is_obj_appearing.to(backbone_features[0].dtype)
object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
object_pointer = lambda_is_obj_appearing * object_pointer
object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer
return Sam3TrackerVideoImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_masks,
high_res_masks=high_res_masks,
object_pointer=object_pointer,
object_score_logits=object_score_logits.unsqueeze(-1),
image_embeddings=high_res_features + [backbone_features],
)
def _select_closest_cond_frames(self, frame_idx, cond_frame_outputs, max_cond_frame_num):
"""
Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
that are temporally closest to the current frame at `frame_idx`. Here, we take
- a) the closest conditioning frame before `frame_idx` (if any);
- b) the closest conditioning frame after `frame_idx` (if any);
- c) any other temporally closest conditioning frames until reaching a total
of `max_cond_frame_num` conditioning frames.
Outputs:
- selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
- unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
"""
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
selected_outputs = cond_frame_outputs
unselected_outputs = {}
else:
selected_outputs = {}
# the closest conditioning frame before `frame_idx` (if any)
idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
if idx_before is not None:
selected_outputs[idx_before] = cond_frame_outputs[idx_before]
# the closest conditioning frame after `frame_idx` (if any)
idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
if idx_after is not None:
selected_outputs[idx_after] = cond_frame_outputs[idx_after]
# add other temporally closest conditioning frames until reaching a total
# of `max_cond_frame_num` conditioning frames.
num_remain = max_cond_frame_num - len(selected_outputs)
inds_remain = sorted(
(t for t in cond_frame_outputs if t not in selected_outputs),
key=lambda x: abs(x - frame_idx),
)[:num_remain]
selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
return selected_outputs, unselected_outputs
def _gather_memory_frame_outputs(
self,
inference_session: Sam3TrackerVideoInferenceSession,
obj_idx: int,
frame_idx: int,
track_in_reverse_time: bool = False,
) -> list[tuple[int, dict]]:
"""
Get memory frames from conditioning and non-conditioning outputs.
Returns:
List of (relative_temporal_offset, output_data) tuples.
"""
temporal_positions_and_previous_outputs = []
# Add conditioning frame outputs (limited by max_cond_frame_num)
conditioning_outputs = inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"]
if not conditioning_outputs:
raise ValueError(
"maskmem_features in conditioning outputs cannot be empty when not is_initial_conditioning_frame"
)
conditioning_outputs, unselected_conditioning_outputs = self._select_closest_cond_frames(
frame_idx, conditioning_outputs, max_cond_frame_num=self.config.max_cond_frame_num
)
# Store (temporal_position, output_data) tuples
temporal_positions_and_previous_outputs = [(0, out) for out in conditioning_outputs.values()]
# Add non-conditioning memory frames (up to self.num_maskmem - 1)
# These are typically frames tracked by the model without direct user input.
# Frames are selected with a stride, prioritizing the most recent ones. Here we only support stride = 1 for simplicity.
for relative_temporal_offset in range(self.num_maskmem - 1, 0, -1):
# relative_temporal_offset: how many frames before (or after if reversing) the current frame
if not track_in_reverse_time:
previous_frame_idx = frame_idx - relative_temporal_offset
else:
previous_frame_idx = frame_idx + relative_temporal_offset
# check if the output is already stored without using get_output to avoid unnecessary memory transfers between CPU and GPU
output_data = inference_session.output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].get(
previous_frame_idx, unselected_conditioning_outputs.get(previous_frame_idx, None)
)
temporal_positions_and_previous_outputs.append((relative_temporal_offset, output_data))
return temporal_positions_and_previous_outputs
def _build_memory_attention_inputs(
self,
temporal_positions_and_previous_outputs: list[tuple[int, dict]],
device: torch.device,
) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
"""
Concatenate memory features and positional embeddings from previous frames.
Returns:
Tuple of (memories_to_concatenate, memory_positional_embeddings_to_concatenate).
"""
memories_to_concatenate = []
memory_positional_embeddings_to_concatenate = []
for relative_temporal_offset, prev_output_data in temporal_positions_and_previous_outputs:
if prev_output_data is None:
continue # Skip if no output data for this temporal position (e.g., padding frames)
# Load memory features (potentially from CPU to GPU)
# Features are flattened: (Batch, Channels, H, W) -> (H*W, Batch, Channels)
memory_features = prev_output_data["maskmem_features"].to(device, non_blocking=True)
memories_to_concatenate.append(memory_features)
# Spatial positional encoding (potentially from CPU to GPU)
spatial_memory_pos_embed = prev_output_data["maskmem_pos_enc"].to(device, non_blocking=True)
# Add temporal positional encoding
# self.memory_temporal_positional_encoding shape: (NumMaskMem, 1, 1, MemDim)
combined_memory_pos_embed = (
spatial_memory_pos_embed + self.memory_temporal_positional_encoding[relative_temporal_offset - 1]
)
memory_positional_embeddings_to_concatenate.append(combined_memory_pos_embed)
return memories_to_concatenate, memory_positional_embeddings_to_concatenate
def _get_object_pointers(
self,
inference_session: Sam3TrackerVideoInferenceSession,
obj_idx: int,
frame_idx: int,
num_total_frames: int,
device: torch.device,
track_in_reverse_time: bool = False,
streaming: bool = False,
) -> tuple[list[int], list[torch.Tensor], int]:
"""
Get object pointers and their positional embeddings from past frames.
Returns:
Tuple of (temporal_offsets, pointer_tokens, max_object_pointers_to_use).
"""
temporal_position_sign_multiplier = -1 if track_in_reverse_time else 1
# Determine max object pointers to use
if streaming:
max_object_pointers_to_use = self.config.max_object_pointers_in_encoder
else:
max_object_pointers_to_use = min(num_total_frames, self.config.max_object_pointers_in_encoder)
temporal_offsets: list[int] = []
pointer_tokens: list[torch.Tensor] = []
# Add object pointers from selected conditioning frames
# Optionally, only include pointers from past frames during evaluation
conditioning_outputs = inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"]
eligible_conditioning_outputs = conditioning_outputs
if not self.training:
eligible_conditioning_outputs = {
temporal_idx: out
for temporal_idx, out in conditioning_outputs.items()
if (temporal_idx >= frame_idx if track_in_reverse_time else temporal_idx <= frame_idx)
}
for temporal_idx, out_data in eligible_conditioning_outputs.items():
temporal_difference = (frame_idx - temporal_idx) * temporal_position_sign_multiplier
temporal_offsets.append(temporal_difference)
pointer_tokens.append(out_data["object_pointer"].to(device))
# Add object pointers from non-conditioning frames (up to max_object_pointers_to_use - 1)
for t_diff_offset in range(1, max_object_pointers_to_use):
ref_frame_idx = frame_idx + t_diff_offset if track_in_reverse_time else frame_idx - t_diff_offset
if ref_frame_idx < 0 or (
not streaming and num_total_frames is not None and ref_frame_idx >= num_total_frames
):
break # Stop if frame index is out of bounds
# check if the output is already stored without using get_output to avoid unnecessary memory transfers between CPU and GPU
out_data = inference_session.output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].get(
ref_frame_idx, None
)
if out_data is not None:
temporal_offsets.append(t_diff_offset)
pointer_tokens.append(out_data["object_pointer"].to(device))
return temporal_offsets, pointer_tokens, max_object_pointers_to_use
def _process_object_pointers(
self,
temporal_offsets: list[int],
pointer_tokens: list[torch.Tensor],
max_object_pointers_to_use: int,
batch_size: int,
num_channels: int,
device: torch.device,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Process object pointers and compute their positional embeddings.
Returns:
Tuple of (object_pointers, object_pointers_pos_embed).
"""
if not pointer_tokens:
return None, None
# Stack object pointers: List of (Batch, Channels) -> (SeqLen_ptr, Batch, Channels)
object_pointers = torch.stack(pointer_tokens, dim=0)
if self.config.enable_temporal_pos_encoding_for_object_pointers:
max_temporal_diff = float(max_object_pointers_to_use - 1)
# Determine dimensionality for temporal positional encoding of pointers
pointer_tpos_dim = num_channels
# Normalize temporal differences before sine PE calculation
normalized_temporal_diffs = (
torch.tensor(temporal_offsets, device=device, dtype=torch.float32) / max_temporal_diff
)
sine_pe = get_1d_sine_pe(normalized_temporal_diffs, dim=pointer_tpos_dim).to(object_pointers.dtype)
projected_sine_pe = self.temporal_positional_encoding_projection_layer(sine_pe)
object_pointers_pos_embed = projected_sine_pe.unsqueeze(1).expand(-1, batch_size, self.mem_dim)
else:
object_pointers_pos_embed = object_pointers.new_zeros(
len(temporal_offsets), batch_size, self.mem_dim, dtype=object_pointers.dtype
)
if self.mem_dim < num_channels:
# If memory dimension is smaller, reshape/split pointers and repeat positional encoding
num_splits = num_channels // self.mem_dim
object_pointers = object_pointers.reshape(-1, batch_size, num_splits, self.mem_dim)
object_pointers = object_pointers.permute(0, 2, 1, 3).flatten(
0, 1
) # (SeqLen_ptr*num_splits, Batch, MemDim)
object_pointers_pos_embed = object_pointers_pos_embed.repeat_interleave(num_splits, dim=0)
return object_pointers, object_pointers_pos_embed
def _prepare_memory_conditioned_features(
self,
inference_session: Sam3TrackerVideoInferenceSession,
frame_idx: int,
obj_idx: int,
is_initial_conditioning_frame: bool,
current_vision_features: list[torch.Tensor],
current_vision_positional_embeddings: list[torch.Tensor],
num_total_frames: int,
track_in_reverse_time: bool = False,
streaming: bool = False,
) -> torch.Tensor:
"""
Fuse current frame's visual features with memory from previous frames for enhanced object tracking.
This method conditions the current frame's visual features on temporal memory from previous frames,
enabling consistent object tracking across video sequences. For initial conditioning frames, it uses
no-memory embeddings. For subsequent frames, it retrieves and integrates memory features from both
conditioning frames (user interactions) and non-conditioning frames (tracked results) via cross-attention.
Args:
inference_session (`Sam3TrackerVideoInferenceSession`):
The video inference session object.
frame_idx (`int`):
Index of the current frame being processed.
obj_idx (`int`):
Index of the object being processed.
is_initial_conditioning_frame (`bool`):
Whether this is an initial conditioning frame with user inputs (True) or a subsequent
tracking frame (False).
current_vision_features (`torch.Tensor`):
Highest-level vision features of shape `(seq_len, batch_size, channels)`.
current_vision_positional_embeddings (`torch.Tensor`):
Positional embedding tensors corresponding to the highest-level vision features.
num_total_frames (`int`):
Total number of frames in the video sequence.
track_in_reverse_time (`bool`, *optional*, defaults to `False`):
Whether tracking is performed in reverse temporal order.
streaming (`bool`, *optional*, defaults to `False`):
Whether this is streaming inference mode.
Returns:
`torch.Tensor`: Memory-conditioned feature tensor of shape `(batch_size, channels, height, width)`
suitable for input to the SAM decoder.
"""
# Get dimensions from the highest-level (lowest-resolution) feature map
batch_size = current_vision_features.size(1)
num_channels = self.hidden_dim
height, width = self.backbone_feature_sizes[-1]
device = current_vision_features.device
# If memory is disabled (e.g., for single image SAM), return current features directly.
if self.num_maskmem == 0:
# Permute (SeqLen, Batch, Channels) -> (Batch, Channels, SeqLen) then view as (Batch, Channels, Height, Width)
# Assuming SeqLen = Height * Width for the last feature map
current_feature_map = current_vision_features.permute(1, 2, 0).view(
batch_size, num_channels, height, width
)
return current_feature_map
# Step 1: Handle initial conditioning frames
if is_initial_conditioning_frame:
# For initial conditioning frames, no prior memory is used directly in this block.
# If configured, directly add a learnable "no memory" embedding.
# current_vision_features has shape (SeqLen, Batch, Channels)
conditioned_feature_map_flat = current_vision_features + self.no_memory_embedding
# Reshape to (Batch, Channels, Height, Width)
conditioned_feature_map = conditioned_feature_map_flat.permute(1, 2, 0).view(
batch_size, num_channels, height, width
)
return conditioned_feature_map
# Step 2: Get memory frames and concatenate their features
temporal_positions_and_previous_outputs = self._gather_memory_frame_outputs(
inference_session, obj_idx, frame_idx, track_in_reverse_time
)
memories_to_concatenate, memory_positional_embeddings_to_concatenate = self._build_memory_attention_inputs(
temporal_positions_and_previous_outputs, device
)
# Step 3: Get and process object pointers
temporal_offsets, pointer_tokens, max_object_pointers_to_use = self._get_object_pointers(
inference_session, obj_idx, frame_idx, num_total_frames, device, track_in_reverse_time, streaming
)
num_object_pointer_tokens = 0
if pointer_tokens:
object_pointers, object_pointers_pos_embed = self._process_object_pointers(
temporal_offsets, pointer_tokens, max_object_pointers_to_use, batch_size, num_channels, device
)
if object_pointers is not None:
memories_to_concatenate.append(object_pointers)
memory_positional_embeddings_to_concatenate.append(object_pointers_pos_embed)
num_object_pointer_tokens = object_pointers.shape[0]
# Step 4: Concatenate all retrieved memories and their positional embeddings
combined_memory = torch.cat(memories_to_concatenate, dim=0)
combined_memory_positional_embeddings = torch.cat(memory_positional_embeddings_to_concatenate, dim=0)
# Step 5: Forward through the memory attention mechanism
conditioned_feature_map_flat = self.memory_attention(
current_vision_features=current_vision_features,
current_vision_position_embeddings=current_vision_positional_embeddings,
memory=combined_memory,
memory_posision_embeddings=combined_memory_positional_embeddings, # Corrected typo from API
num_object_pointer_tokens=num_object_pointer_tokens,
)
# Reshape from (Batch, H*W, Channels) to (Batch, Channels, Height, Width)
conditioned_feature_map = (
conditioned_feature_map_flat.squeeze(1).permute(0, 2, 1).view(batch_size, num_channels, height, width)
)
return conditioned_feature_map
def _use_multimask(self, is_init_cond_frame: bool, point_inputs: Optional[dict]) -> bool:
"""Whether to use multimask output in the SAM head."""
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(2)
multimask_output = (
self.config.multimask_output_in_sam
and (is_init_cond_frame or self.config.multimask_output_for_tracking)
and (self.config.multimask_min_pt_num <= num_pts <= self.config.multimask_max_pt_num)
)
return multimask_output
def _run_single_frame_inference(
self,
inference_session: Sam3TrackerVideoInferenceSession,
frame_idx: int,
obj_idx: int,
batch_size: int,
is_init_cond_frame: bool,
point_inputs: Optional[torch.Tensor],
mask_inputs: Optional[torch.Tensor],
reverse: bool,
prev_sam_mask_logits: Optional[torch.Tensor] = None,
streaming: bool = False,
) -> dict[str, Any]:
"""
Perform a single tracking step for video object segmentation.
Args:
inference_session (`Sam3TrackerVideoInferenceSession`):
The video inference session object.
frame_idx (`int`):
Index of the current frame.
obj_idx (`int`):
Index of the current object.
batch_size (`int`):
Batch size of the current frame.
is_init_cond_frame (`bool`):
Whether this is an initial conditioning frame with user inputs.
point_inputs (`dict`, *optional*):
Point prompt inputs for the current frame.
mask_inputs (`torch.Tensor`, *optional*):
Mask prompt inputs for the current frame.
reverse (`bool`, *optional*, defaults to `False`):
Whether to track in reverse time order.
prev_sam_mask_logits (`torch.Tensor`, *optional*):
Previously predicted SAM mask logits that can be fed with new clicks.
streaming (`bool`, *optional*, defaults to `False`):
Whether this is streaming inference.
Returns:
`dict`: Dictionary containing the tracking results for the current frame, including:
- pred_masks: Predicted low-resolution masks.
- object_pointer: Object pointer for memory.
- high_res_masks: High-resolution masks for batched memory encoding.
- object_score_logits: Object score logits (inference only).
"""
# Retrieve correct image features
current_vision_feats, current_vision_pos_embeds = self._prepare_vision_features(
inference_session, frame_idx, batch_size
)
# point and mask should not appear as input simultaneously on the same frame
if point_inputs is not None and mask_inputs is not None:
raise ValueError(
"point_inputs and mask_inputs should not appear as input simultaneously on the same frame"
)
# High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
if len(current_vision_feats) > 1:
high_res_features = [
x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
for x, s in zip(current_vision_feats[:-1], self.backbone_feature_sizes[:-1])
]
else:
high_res_features = None
if mask_inputs is not None:
# We directly output the mask input (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
pix_feat = current_vision_feats[-1].permute(1, 2, 0)
pix_feat = pix_feat.view(-1, self.hidden_dim, *self.backbone_feature_sizes[-1])
sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs)
else:
# fused the visual feature with previous memory features in the memory bank
pix_feat = self._prepare_memory_conditioned_features(
inference_session=inference_session,
frame_idx=frame_idx,
obj_idx=obj_idx,
is_initial_conditioning_frame=is_init_cond_frame,
current_vision_features=current_vision_feats[-1],
current_vision_positional_embeddings=current_vision_pos_embeds[-1],
num_total_frames=inference_session.num_frames,
track_in_reverse_time=reverse,
streaming=streaming,
)
# apply SAM-style segmentation head
# here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
# e.g. in demo where such logits come from earlier interaction instead of correction sampling
# (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
if prev_sam_mask_logits is not None:
mask_inputs = prev_sam_mask_logits
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
sam_outputs = self._single_frame_forward(
pixel_values=None, # Vision features already computed
input_points=point_inputs["point_coords"] if point_inputs is not None else None,
input_labels=point_inputs["point_labels"] if point_inputs is not None else None,
input_masks=mask_inputs,
image_embeddings=high_res_features + [pix_feat],
multimask_output=multimask_output,
)
# Memory encoding is now handled in batch by the caller (forward method)
current_out = {
"pred_masks": sam_outputs.pred_masks,
"object_pointer": sam_outputs.object_pointer,
"high_res_masks": sam_outputs.high_res_masks, # Needed for batched memory encoding
}
if not self.training:
current_out["object_score_logits"] = sam_outputs.object_score_logits
return current_out
def _encode_new_memory(
self,
current_vision_feats: torch.Tensor,
pred_masks_high_res: torch.Tensor,
object_score_logits: torch.Tensor,
is_mask_from_pts: bool,
) -> tuple[torch.Tensor, list[torch.Tensor]]:
"""Encode the current image and its prediction into a memory feature."""
batch_size = current_vision_feats.size(1) # batch size on this frame
channels = self.hidden_dim
height, width = self.backbone_feature_sizes[-1] # top-level (lowest-resolution) feature size
mask_input_size_h, mask_input_size_w = self.prompt_encoder.mask_input_size
mask_mem_size_h = mask_input_size_h * 4
mask_mem_size_w = mask_input_size_w * 4
if pred_masks_high_res.shape[2:] != (mask_mem_size_h, mask_mem_size_w):
# downsample the predicted high-res masks into the mask encoder input size
pred_masks_high_res = F.interpolate(
pred_masks_high_res.float(),
size=(mask_mem_size_h, mask_mem_size_w),
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(pred_masks_high_res.dtype)
# top-level feature, (HW)BC => BCHW
pix_feat = current_vision_feats.permute(1, 2, 0).view(batch_size, channels, height, width)
if is_mask_from_pts and not self.training:
# binarize the mask logits
mask_for_mem = (pred_masks_high_res > 0).to(pred_masks_high_res.dtype)
else:
# apply sigmoid on the raw mask logits to turn them into range (0, 1)
mask_for_mem = torch.sigmoid(pred_masks_high_res)
# apply scale and bias terms to the sigmoid probabilities
mask_for_mem = mask_for_mem * self.config.sigmoid_scale_for_mem_enc
mask_for_mem = mask_for_mem + self.config.sigmoid_bias_for_mem_enc
maskmem_features, maskmem_pos_enc = self.memory_encoder(
pix_feat,
mask_for_mem,
)
# add a no-object embedding to the spatial memory to indicate that the frame
# is predicted to be occluded (i.e. no object is appearing in the frame)
if self.occlusion_spatial_embedding_parameter is not None:
is_obj_appearing = (object_score_logits > 0).float()
maskmem_features += (1 - is_obj_appearing[..., None]) * self.occlusion_spatial_embedding_parameter[
..., None, None
].expand(*maskmem_features.shape)
# convert to bfloat16 to save memory, and for consistency with the original implementation
maskmem_features = maskmem_features.to(torch.bfloat16).flatten(2).permute(2, 0, 1)
maskmem_pos_enc = maskmem_pos_enc.to(pred_masks_high_res.dtype).flatten(2).permute(2, 0, 1)
return maskmem_features, maskmem_pos_enc
def _batch_encode_memories(
self,
inference_session: Sam3TrackerVideoInferenceSession,
frame_idx: int,
objects_needing_memory_encoding: list[int],
high_res_masks_for_memory: list[torch.Tensor],
object_score_logits_for_memory: list[torch.Tensor],
is_mask_from_pts_per_obj: list[bool],
):
"""
Batch encode memories for multiple objects at once.
Args:
inference_session: The video inference session object
frame_idx: Index of the current frame
objects_needing_memory_encoding: List of object indices that need memory encoding
high_res_masks_for_memory: List of high-resolution masks for each object
object_score_logits_for_memory: List of object score logits for each object
is_mask_from_pts_per_obj: List of booleans indicating if mask is from points for each object
"""
if not objects_needing_memory_encoding:
return
# Get vision features once for all objects
current_vision_feats, _ = self._prepare_vision_features(inference_session, frame_idx, batch_size=1)
# Stack all high-res masks and object scores
high_res_masks_batched = torch.cat(high_res_masks_for_memory, dim=0)
object_score_logits_batched = torch.cat(object_score_logits_for_memory, dim=0)
# Expand vision features to match batch size
expanded_vision_feats = current_vision_feats[-1].expand(-1, len(objects_needing_memory_encoding), -1)
# Encode all memories in one batch call
maskmem_features_batched, maskmem_pos_enc_batched = self._encode_new_memory(
current_vision_feats=expanded_vision_feats,
pred_masks_high_res=high_res_masks_batched,
object_score_logits=object_score_logits_batched,
is_mask_from_pts=any(is_mask_from_pts_per_obj),
)
# Split and store encoded memories per object
for i, obj_idx in enumerate(objects_needing_memory_encoding):
# Extract per-object memory from batched result
maskmem_features = maskmem_features_batched[:, i : i + 1]
maskmem_pos_enc = maskmem_pos_enc_batched[:, i : i + 1]
# Update the stored output with memory features
output_dict = inference_session.output_dict_per_obj[obj_idx]
# Determine if this was a conditioning frame
storage_key = (
"cond_frame_outputs" if frame_idx in output_dict["cond_frame_outputs"] else "non_cond_frame_outputs"
)
if frame_idx in output_dict[storage_key]:
output_dict[storage_key][frame_idx]["maskmem_features"] = maskmem_features
output_dict[storage_key][frame_idx]["maskmem_pos_enc"] = maskmem_pos_enc
@torch.inference_mode()
@auto_docstring(
custom_intro="""
Propagate the objects through the video frames. Used when initializing an inference session with a whole video.
Yields Sam3TrackerVideoSegmentationOutput for each frame.
"""
)
def propagate_in_video_iterator(
self,
inference_session: Sam3TrackerVideoInferenceSession,
start_frame_idx: Optional[int] = None,
max_frame_num_to_track: Optional[int] = None,
reverse: bool = False,
show_progress_bar: bool = False,
) -> Iterator[Sam3TrackerVideoSegmentationOutput]:
r"""
inference_session (`Sam3TrackerVideoInferenceSession`):
The video inference session object.
start_frame_idx (`int`, *optional*):
The starting frame index for propagation.
Need to be provided if `forward` hasn't been called on new inputs yet.
If not provided, the starting frame index will be the earliest frame with input points.
max_frame_num_to_track (`int`, *optional*):
The maximum number of frames to track.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
show_progress_bar (`bool`, *optional*, defaults to `False`):
Whether to show a progress bar during propagation.
"""
num_frames = inference_session.num_frames
# set start index, end index, and processing order
if start_frame_idx is None:
# default: start from the earliest frame with input points
frames_with_inputs = [
frame_idx
for obj_output_dict in inference_session.output_dict_per_obj.values()
for frame_idx in obj_output_dict["cond_frame_outputs"]
]
if not frames_with_inputs:
raise ValueError(
"Cannot determine the starting frame index; please specify it manually, or run inference on a frame with inputs first."
)
start_frame_idx = min(frames_with_inputs)
if max_frame_num_to_track is None:
# default: track all the frames in the video
max_frame_num_to_track = num_frames
if reverse:
end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
if start_frame_idx > 0:
processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
else:
processing_order = [] # skip reverse tracking if starting from frame 0
else:
end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1)
processing_order = range(start_frame_idx, end_frame_idx + 1)
for frame_idx in tqdm(processing_order, desc="propagate in video", disable=not show_progress_bar):
sam3_tracker_video_output = self(inference_session, frame_idx=frame_idx, reverse=reverse)
yield sam3_tracker_video_output
__all__ = ["Sam3TrackerVideoModel", "Sam3TrackerVideoInferenceSession", "Sam3TrackerVideoPreTrainedModel"]
| Sam3TrackerVideoModel |
python | pypa__pip | tests/unit/test_options.py | {
"start": 14799,
"end": 19673
} | class ____(AddFakeCommandMixin):
# the reason to specifically test general options is due to the
# extra processing they receive, and the number of bugs we've had
def test_cache_dir__default(self) -> None:
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
# With no options the default cache dir should be used.
assert_is_default_cache_dir(options.cache_dir)
def test_cache_dir__provided(self) -> None:
# FakeCommand intentionally returns the wrong type.
options, args = cast(
tuple[Values, list[str]], main(["--cache-dir", "/cache/dir", "fake"])
)
assert options.cache_dir == "/cache/dir"
def test_no_cache_dir__provided(self) -> None:
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["--no-cache-dir", "fake"]))
assert options.cache_dir is False
def test_require_virtualenv(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--require-virtualenv", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--require-virtualenv"])
)
assert options1.require_venv
assert options2.require_venv
def test_log(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--log", "path", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--log", "path"])
)
assert options1.log == options2.log == "path"
def test_local_log(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--local-log", "path", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--local-log", "path"])
)
assert options1.log == options2.log == "path"
def test_no_input(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(tuple[Values, list[str]], main(["--no-input", "fake"]))
options2, args2 = cast(tuple[Values, list[str]], main(["fake", "--no-input"]))
assert options1.no_input
assert options2.no_input
def test_proxy(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--proxy", "path", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--proxy", "path"])
)
assert options1.proxy == options2.proxy == "path"
def test_retries(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--retries", "-1", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--retries", "-1"])
)
assert options1.retries == options2.retries == -1
def test_timeout(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--timeout", "-1", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--timeout", "-1"])
)
assert options1.timeout == options2.timeout == -1
def test_exists_action(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--exists-action", "w", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--exists-action", "w"])
)
assert options1.exists_action == options2.exists_action == ["w"]
def test_cert(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--cert", "path", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--cert", "path"])
)
assert options1.cert == options2.cert == "path"
def test_client_cert(self) -> None:
# FakeCommand intentionally returns the wrong type.
options1, args1 = cast(
tuple[Values, list[str]], main(["--client-cert", "path", "fake"])
)
options2, args2 = cast(
tuple[Values, list[str]], main(["fake", "--client-cert", "path"])
)
assert options1.client_cert == options2.client_cert == "path"
| TestGeneralOptions |
python | pennersr__django-allauth | allauth/socialaccount/providers/trainingpeaks/provider.py | {
"start": 418,
"end": 1478
} | class ____(OAuth2Provider):
id = "trainingpeaks"
name = "TrainingPeaks"
account_class = TrainingPeaksAccount
oauth2_adapter_class = TrainingPeaksOAuth2Adapter
def extract_uid(self, data):
return str(data["Id"])
def extract_common_fields(self, data):
extra_common = super(TrainingPeaksProvider, self).extract_common_fields(data)
firstname = data.get("FirstName")
lastname = data.get("LastName")
# fallback username as there is actually no Username in response
username = firstname.strip().lower() + "." + lastname.strip().lower()
name = " ".join(part for part in (firstname, lastname) if part)
extra_common.update(
username=data.get("username", username),
email=data.get("Email"),
first_name=firstname,
last_name=lastname,
name=name.strip(),
)
return extra_common
def get_default_scope(self):
return ["athlete:profile"]
provider_classes = [TrainingPeaksProvider]
| TrainingPeaksProvider |
python | doocs__leetcode | solution/1200-1299/1262.Greatest Sum Divisible by Three/Solution.py | {
"start": 0,
"end": 323
} | class ____:
def maxSumDivThree(self, nums: List[int]) -> int:
n = len(nums)
f = [[-inf] * 3 for _ in range(n + 1)]
f[0][0] = 0
for i, x in enumerate(nums, 1):
for j in range(3):
f[i][j] = max(f[i - 1][j], f[i - 1][(j - x) % 3] + x)
return f[n][0]
| Solution |
python | getsentry__sentry | src/sentry/codecov/endpoints/repositories/repositories.py | {
"start": 962,
"end": 3640
} | class ____(CodecovEndpoint):
owner = ApiOwner.CODECOV
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieves list of repositories for a given owner",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
PreventParams.OWNER,
PreventParams.LIMIT,
PreventParams.NAVIGATION,
PreventParams.CURSOR,
PreventParams.TERM,
],
request=None,
responses={
200: RepositoriesSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, owner: RpcIntegration, **kwargs) -> Response:
"""
Retrieves repository data for a given owner.
"""
navigation = request.query_params.get("navigation", NavigationParameter.NEXT.value)
limit_param = request.query_params.get("limit", MAX_RESULTS_PER_PAGE)
cursor = request.query_params.get("cursor")
owner_slug = owner.name
# When calling request.query_params, the URL is decoded so + is replaced with spaces. We need to change them back so Codecov can properly fetch the next page.
if cursor:
cursor = cursor.replace(" ", "+")
try:
limit = int(limit_param)
except ValueError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"details": "provided `limit` parameter must be a positive integer"},
)
if limit <= 0:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"details": "provided `limit` parameter must be a positive integer"},
)
variables = {
"owner": owner_slug,
"filters": {"term": request.query_params.get("term")},
"direction": OrderingDirection.DESC.value,
"ordering": "COMMIT_DATE",
"first": limit if navigation != NavigationParameter.PREV.value else None,
"last": limit if navigation == NavigationParameter.PREV.value else None,
"before": cursor if cursor and navigation == NavigationParameter.PREV.value else None,
"after": cursor if cursor and navigation == NavigationParameter.NEXT.value else None,
}
client = CodecovApiClient(git_provider_org=owner_slug)
graphql_response = client.query(query=query, variables=variables)
repositories = RepositoriesSerializer().to_representation(graphql_response.json())
return Response(repositories)
| RepositoriesEndpoint |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/providers/document.py | {
"start": 730,
"end": 12570
} | class ____:
def register_file(self, filename, codeeditor):
filename = path_as_uri(filename)
if filename not in self.watched_files:
self.watched_files[filename] = []
self.watched_files[filename].append(codeeditor)
@handles(CompletionRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS)
def process_document_diagnostics(self, response, *args):
uri = response['uri']
diagnostics = response['diagnostics']
if uri in self.watched_files:
callbacks = self.watched_files[uri]
for callback in callbacks:
callback.handle_response(
CompletionRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS,
{'params': diagnostics})
else:
logger.debug("Received diagnostics for file not open: " + uri)
@send_notification(method=CompletionRequestTypes.DOCUMENT_DID_CHANGE)
def document_changed(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file']),
'version': params['version']
},
'contentChanges': [{
'text': params['text']
}]
}
return params
@send_notification(method=CompletionRequestTypes.DOCUMENT_DID_OPEN)
def document_open(self, editor_params):
uri = path_as_uri(editor_params['file'])
if uri not in self.watched_files:
self.register_file(
editor_params['file'], editor_params['codeeditor'])
params = {
'textDocument': {
'uri': uri,
'languageId': editor_params['language'],
'version': editor_params['version'],
'text': editor_params['text']
}
}
return params
@send_request(method=CompletionRequestTypes.DOCUMENT_COMPLETION)
def document_completion_request(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'position': {
'line': params['line'],
'character': params['column']
}
}
return params
@handles(CompletionRequestTypes.DOCUMENT_COMPLETION)
def process_document_completion(self, response, req_id):
if isinstance(response, dict):
response = response['items']
must_resolve = self.server_capabilites['completionProvider'].get(
'resolveProvider', False)
if response is not None:
for item in response:
item['kind'] = item.get('kind', CompletionItemKind.TEXT)
item['detail'] = item.get('detail', '')
item['documentation'] = item.get('documentation', '')
item['sortText'] = item.get('sortText', item['label'])
item['filterText'] = item.get('filterText', item['label'])
item['insertTextFormat'] = item.get(
'insertTextFormat', InsertTextFormat.PLAIN_TEXT)
item['insertText'] = item.get('insertText', item['label'])
item['provider'] = LSP_COMPLETION
item['resolve'] = must_resolve
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_COMPLETION,
{'params': response}
)
@send_request(method=CompletionRequestTypes.COMPLETION_RESOLVE)
def completion_resolve_request(self, params):
return params['completion_item']
@handles(CompletionRequestTypes.COMPLETION_RESOLVE)
def handle_completion_resolve(self, response, req_id):
response['kind'] = response.get('kind', CompletionItemKind.TEXT)
response['detail'] = response.get('detail', '')
response['documentation'] = response.get('documentation', '')
response['sortText'] = response.get('sortText', response['label'])
response['filterText'] = response.get('filterText', response['label'])
response['insertTextFormat'] = response.get(
'insertTextFormat', InsertTextFormat.PLAIN_TEXT)
response['insertText'] = response.get('insertText', response['label'])
response['provider'] = LSP_COMPLETION
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.COMPLETION_RESOLVE,
{'params': response}
)
@send_request(method=CompletionRequestTypes.DOCUMENT_SIGNATURE)
def signature_help_request(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'position': {
'line': params['line'],
'character': params['column']
}
}
return params
@handles(CompletionRequestTypes.DOCUMENT_SIGNATURE)
def process_signature_completion(self, response, req_id):
if response and len(response['signatures']) > 0:
response['signatures'] = response['signatures'][
response['activeSignature']]
response['provider'] = LSP_COMPLETION
else:
response = None
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_SIGNATURE,
{'params': response})
@send_request(method=CompletionRequestTypes.DOCUMENT_HOVER)
def hover_request(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'position': {
'line': params['line'],
'character': params['column']
}
}
return params
@handles(CompletionRequestTypes.DOCUMENT_HOVER)
def process_hover_result(self, result, req_id):
contents = result['contents']
if isinstance(contents, dict):
if 'value' in contents:
contents = contents['value']
elif isinstance(contents, list):
text = []
for entry in contents:
if isinstance(entry, dict):
text.append(entry['value'])
else:
text.append(entry)
contents = '\n\n'.join(text)
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_HOVER,
{'params': contents})
@send_request(method=CompletionRequestTypes.DOCUMENT_SYMBOL)
def document_symbol_request(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
}
return params
@handles(CompletionRequestTypes.DOCUMENT_SYMBOL)
def process_document_symbol_request(self, result, req_id):
if req_id in self.req_reply:
self.req_reply[req_id](CompletionRequestTypes.DOCUMENT_SYMBOL,
{'params': result})
@send_request(method=CompletionRequestTypes.DOCUMENT_DEFINITION)
def go_to_definition_request(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'position': {
'line': params['line'],
'character': params['column']
}
}
return params
@handles(CompletionRequestTypes.DOCUMENT_DEFINITION)
def process_go_to_definition(self, result, req_id):
if isinstance(result, list):
if len(result) > 0:
result = result[0]
result['file'] = process_uri(result['uri'])
else:
result = None
elif isinstance(result, dict):
result['file'] = process_uri(result['uri'])
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_DEFINITION,
{'params': result})
@send_request(method=CompletionRequestTypes.DOCUMENT_FOLDING_RANGE)
def folding_range_request(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
}
}
return params
@handles(CompletionRequestTypes.DOCUMENT_FOLDING_RANGE)
def process_folding_range(self, result, req_id):
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_FOLDING_RANGE,
{'params': result})
@send_notification(method=CompletionRequestTypes.DOCUMENT_WILL_SAVE)
def document_will_save_notification(self, params):
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'reason': params['reason']
}
return params
@send_notification(method=CompletionRequestTypes.DOCUMENT_DID_SAVE)
def document_did_save_notification(self, params):
"""
Handle the textDocument/didSave message received from an LSP server.
"""
text = None
if 'text' in params:
text = params['text']
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
}
}
if text is not None:
params['text'] = text
return params
@send_notification(method=CompletionRequestTypes.DOCUMENT_DID_CLOSE)
def document_did_close(self, params):
codeeditor = params['codeeditor']
filename = path_as_uri(params['file'])
params = {
'textDocument': {
'uri': filename
}
}
if filename not in self.watched_files:
params[ClientConstants.CANCEL] = True
else:
editors = self.watched_files[filename]
if len(editors) > 1:
params[ClientConstants.CANCEL] = True
idx = -1
for i, editor in enumerate(editors):
if id(codeeditor) == id(editor):
idx = i
break
if idx >= 0:
editors.pop(idx)
if len(editors) == 0:
self.watched_files.pop(filename)
return params
@send_request(method=CompletionRequestTypes.DOCUMENT_FORMATTING)
def document_formatting_request(self, params):
options = params['options']
options = {
snake_to_camel(opt): options[opt]
for opt in options
}
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'options': options
}
return params
@handles(CompletionRequestTypes.DOCUMENT_FORMATTING)
def process_document_formatting(self, result, req_id):
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_FORMATTING,
{'params': result})
@send_request(method=CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING)
def document_range_formatting_request(self, params):
options = params['options']
options = {
snake_to_camel(opt): options[opt]
for opt in options
}
params = {
'textDocument': {
'uri': path_as_uri(params['file'])
},
'options': options,
'range': params['range']
}
return params
@handles(CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING)
def process_document_range_formatting(self, result, req_id):
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING,
{'params': result})
| DocumentProvider |
python | PrefectHQ__prefect | src/prefect/utilities/visualization.py | {
"start": 573,
"end": 2663
} | class ____(Exception):
pass
def get_task_viz_tracker() -> Optional["TaskVizTracker"]:
return TaskVizTrackerState.current
@overload
def track_viz_task(
is_async: Literal[True],
task_name: str,
parameters: dict[str, Any],
viz_return_value: Optional[Any] = None,
) -> Coroutine[Any, Any, Any]: ...
@overload
def track_viz_task(
is_async: Literal[False],
task_name: str,
parameters: dict[str, Any],
viz_return_value: Optional[Any] = None,
) -> Any: ...
def track_viz_task(
is_async: bool,
task_name: str,
parameters: dict[str, Any],
viz_return_value: Optional[Any] = None,
) -> Union[Coroutine[Any, Any, Any], Any]:
"""Return a result if sync otherwise return a coroutine that returns the result"""
if is_async:
return from_async.wait_for_call_in_loop_thread(
partial(_track_viz_task, task_name, parameters, viz_return_value)
)
else:
return _track_viz_task(task_name, parameters, viz_return_value)
def _track_viz_task(
task_name: str,
parameters: dict[str, Any],
viz_return_value: Optional[Any] = None,
) -> Any:
task_run_tracker = get_task_viz_tracker()
if task_run_tracker:
upstream_tasks: list[VizTask] = []
for _, v in parameters.items():
if isinstance(v, VizTask):
upstream_tasks.append(v)
# if it's an object that we've already seen,
# we can use the object id to find if there is a trackable task
# if so, add it to the upstream tasks
elif id(v) in task_run_tracker.object_id_to_task:
upstream_tasks.append(task_run_tracker.object_id_to_task[id(v)])
viz_task = VizTask(
name=task_name,
upstream_tasks=upstream_tasks,
)
task_run_tracker.add_task(viz_task)
if viz_return_value:
task_run_tracker.link_viz_return_value_to_viz_task(
viz_return_value, viz_task
)
return viz_return_value
return viz_task
| GraphvizExecutableNotFoundError |
python | getsentry__sentry | tests/sentry_plugins/slack/test_plugin.py | {
"start": 502,
"end": 6382
} | class ____(PluginTestCase):
@cached_property
def plugin(self) -> SlackPlugin:
return SlackPlugin()
@responses.activate
def test_simple_notification(self) -> None:
responses.add("POST", "http://example.com/slack")
self.plugin.set_option("webhook", "http://example.com/slack", self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning", "culprit": "foo.bar"},
project_id=self.project.id,
)
group = event.group
assert group is not None
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
self.plugin.notify(notification)
request = responses.calls[0].request
payload = orjson.loads(parse_qs(request.body)["payload"][0])
assert payload == {
"username": "Sentry",
"attachments": [
{
"color": LEVEL_TO_COLOR["warning"],
"fields": [
{"short": False, "value": "foo.bar", "title": "Culprit"},
{"short": True, "value": "bar", "title": "Project"},
],
"fallback": "[bar] Hello world",
"title": "Hello world",
"title_link": group.get_absolute_url(params={"referrer": "slack"}),
}
],
}
@responses.activate
def test_notification_without_culprit(self) -> None:
responses.add("POST", "http://example.com/slack")
self.plugin.set_option("webhook", "http://example.com/slack", self.project)
self.plugin.set_option("exclude_culprit", True, self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning"}, project_id=self.project.id
)
group = event.group
assert group is not None
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
self.plugin.notify(notification)
request = responses.calls[0].request
payload = orjson.loads(parse_qs(request.body)["payload"][0])
assert payload == {
"username": "Sentry",
"attachments": [
{
"color": LEVEL_TO_COLOR["warning"],
"fields": [{"short": True, "value": "bar", "title": "Project"}],
"fallback": "[bar] Hello world",
"title": "Hello world",
"title_link": group.get_absolute_url(params={"referrer": "slack"}),
}
],
}
@responses.activate
def test_notification_without_project(self) -> None:
responses.add("POST", "http://example.com/slack")
self.plugin.set_option("webhook", "http://example.com/slack", self.project)
self.plugin.set_option("exclude_project", True, self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning", "culprit": "foo.bar"},
project_id=self.project.id,
)
group = event.group
assert group is not None
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
self.plugin.notify(notification)
request = responses.calls[0].request
payload = orjson.loads(parse_qs(request.body)["payload"][0])
assert payload == {
"username": "Sentry",
"attachments": [
{
"color": LEVEL_TO_COLOR["warning"],
"fields": [{"short": False, "value": "foo.bar", "title": "Culprit"}],
"fallback": "[bar] Hello world",
"title": "Hello world",
"title_link": group.get_absolute_url(params={"referrer": "slack"}),
}
],
}
@responses.activate
def test_no_error_on_404(self) -> None:
responses.add("POST", "http://example.com/slack", status=404)
self.plugin.set_option("webhook", "http://example.com/slack", self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning", "culprit": "foo.bar"},
project_id=self.project.id,
)
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
# No exception since 404s are supposed to be ignored
self.plugin.notify(notification)
responses.replace("POST", "http://example.com/slack", status=400)
# Other exceptions should not be ignored
with pytest.raises(ApiError):
self.plugin.notify(notification)
@responses.activate
def test_no_error_on_ignorable_slack_errors(self) -> None:
responses.add("POST", "http://example.com/slack", status=403, body="action_prohibited")
self.plugin.set_option("webhook", "http://example.com/slack", self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning", "culprit": "foo.bar"},
project_id=self.project.id,
)
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
# No exception since certain errors are supposed to be ignored
with self.options({"system.url-prefix": "http://example.com"}):
self.plugin.notify(notification)
responses.replace("POST", "http://example.com/slack", status=403, body="some_other_error")
# Other exceptions should not be ignored
with pytest.raises(ApiError):
self.plugin.notify(notification)
| SlackPluginTest |
python | automl__auto-sklearn | autosklearn/ensembles/singlebest_ensemble.py | {
"start": 503,
"end": 5339
} | class ____(AbstractEnsemble):
"""Ensemble consisting of a single model.
Parameters
----------
task_type: int
An identifier indicating which task is being performed.
metrics: Sequence[Scorer] | Scorer
The metrics used to evaluate the models.
backend : Backend
Gives access to the backend of Auto-sklearn. Not used.
random_state: int | RandomState | None = None
Not used.
"""
def __init__(
self,
task_type: int,
metrics: Sequence[Scorer] | Scorer,
backend: Backend,
random_state: int | np.random.RandomState | None = None,
):
self.weights_ = [1.0]
self.task_type = task_type
if isinstance(metrics, Sequence):
self.metrics = metrics
elif isinstance(metrics, Scorer):
self.metrics = [metrics]
else:
raise TypeError(type(metrics))
self.random_state = random_state
self.backend = backend
def fit(
self,
base_models_predictions: np.ndarray | list[np.ndarray],
true_targets: np.ndarray,
model_identifiers: list[tuple[int, int, float]],
runs: Sequence[Run],
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> AbstractSingleModelEnsemble:
"""Fit the ensemble
Parameters
----------
base_models_predictions: np.ndarray
shape = (n_base_models, n_data_points, n_targets)
n_targets is the number of classes in case of classification,
n_targets is 0 or 1 in case of regression
Can be a list of 2d numpy arrays as well to prevent copying all
predictions into a single, large numpy array.
true_targets : array of shape [n_targets]
model_identifiers : identifier for each base model.
Can be used for practical text output of the ensemble.
runs: Sequence[Run]
Additional information for each run executed by SMAC that was
considered by the ensemble builder.
X_data : list-like | sparse matrix | None = None
Returns
-------
self
"""
return self
def predict(self, predictions: np.ndarray | list[np.ndarray]) -> np.ndarray:
"""Select the predictions of the selected model.
Parameters
----------
base_models_predictions : np.ndarray
shape = (n_base_models, n_data_points, n_targets)
Same as in the fit method.
Returns
-------
np.ndarray
"""
return predictions[0]
def __str__(self) -> str:
return "%s:\n\tMembers: %s" "\n\tWeights: %s\n\tIdentifiers: [%s]" % (
self.__class__.__name__,
self.indices_, # type: ignore [attr-defined]
self.weights_,
self.identifiers_[0], # type: ignore [attr-defined]
)
def get_models_with_weights(
self, models: dict[tuple[int, int, float], BasePipeline]
) -> list[tuple[float, BasePipeline]]:
"""List of (weight, model) pairs for the model selected by this ensemble.
Parameters
----------
models : dict {identifier : model object}
The identifiers are the same as the one presented to the fit()
method. Models can be used for nice printing.
Returns
-------
list[tuple[float, BasePipeline]]
"""
return [(self.weights_[0], models[self.identifiers_[0]])] # type: ignore [attr-defined] # noqa: E501
def get_identifiers_with_weights(
self,
) -> list[tuple[tuple[int, int, float], float]]:
"""Return a (identifier, weight)-pairs for the model selected by this ensemble.
Parameters
----------
models : dict {identifier : model object}
The identifiers are the same as the one presented to the fit()
method. Models can be used for nice printing.
Returns
-------
list[tuple[tuple[int, int, float], float]
"""
return list(zip(self.identifiers_, self.weights_)) # type: ignore [attr-defined] # noqa: E501
def get_selected_model_identifiers(self) -> list[tuple[int, int, float]]:
"""Return identifier of models in the ensemble.
This includes models which have a weight of zero!
Returns
-------
list
"""
return self.identifiers_ # type: ignore [attr-defined]
def get_validation_performance(self) -> float:
"""Return validation performance of ensemble.
In case of multi-objective problem, only the first metric will be returned.
Return
------
float
"""
return self.best_model_score_ # type: ignore [attr-defined]
| AbstractSingleModelEnsemble |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/direct_url.py | {
"start": 4435,
"end": 6576
} | class ____:
url: str
info: InfoType
subdirectory: Optional[str] = None
def _remove_auth_from_netloc(self, netloc: str) -> str:
if "@" not in netloc:
return netloc
user_pass, netloc_no_user_pass = netloc.split("@", 1)
if (
isinstance(self.info, VcsInfo)
and self.info.vcs == "git"
and user_pass == "git"
):
return netloc
if ENV_VAR_RE.match(user_pass):
return netloc
return netloc_no_user_pass
@property
def redacted_url(self) -> str:
"""url with user:password part removed unless it is formed with
environment variables as specified in PEP 610, or it is ``git``
in the case of a git URL.
"""
purl = urllib.parse.urlsplit(self.url)
netloc = self._remove_auth_from_netloc(purl.netloc)
surl = urllib.parse.urlunsplit(
(purl.scheme, netloc, purl.path, purl.query, purl.fragment)
)
return surl
def validate(self) -> None:
self.from_dict(self.to_dict())
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl":
return DirectUrl(
url=_get_required(d, str, "url"),
subdirectory=_get(d, str, "subdirectory"),
info=_exactly_one_of(
[
ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
DirInfo._from_dict(_get(d, dict, "dir_info")),
VcsInfo._from_dict(_get(d, dict, "vcs_info")),
]
),
)
def to_dict(self) -> Dict[str, Any]:
res = _filter_none(
url=self.redacted_url,
subdirectory=self.subdirectory,
)
res[self.info.name] = self.info._to_dict()
return res
@classmethod
def from_json(cls, s: str) -> "DirectUrl":
return cls.from_dict(json.loads(s))
def to_json(self) -> str:
return json.dumps(self.to_dict(), sort_keys=True)
def is_local_editable(self) -> bool:
return isinstance(self.info, DirInfo) and self.info.editable
| DirectUrl |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 2369,
"end": 2616
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a flow."""
tags: list[str] = Field(
default_factory=list,
description="A list of flow tags",
examples=[["tag-1", "tag-2"]],
)
| FlowUpdate |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 27616,
"end": 34147
} | class ____(PsutilTestCase):
@pytest.mark.skipif(not HAS_CPU_FREQ, reason="not supported")
@pytest.mark.skipif(
AARCH64, reason="aarch64 does not always expose frequency"
)
def test_emulate_use_second_file(self):
# https://github.com/giampaolo/psutil/issues/981
def path_exists_mock(path):
if path.startswith("/sys/devices/system/cpu/cpufreq/policy"):
return False
else:
return orig_exists(path)
orig_exists = os.path.exists
with mock.patch(
"os.path.exists", side_effect=path_exists_mock, create=True
):
assert psutil.cpu_freq()
@pytest.mark.skipif(not HAS_CPU_FREQ, reason="not supported")
@pytest.mark.skipif(
AARCH64 or RISCV64,
reason=f"{platform.machine()} does not report mhz in /proc/cpuinfo",
)
def test_emulate_use_cpuinfo(self):
# Emulate a case where /sys/devices/system/cpu/cpufreq* does not
# exist and /proc/cpuinfo is used instead.
def path_exists_mock(path):
if path.startswith('/sys/devices/system/cpu/'):
return False
else:
return os_path_exists(path)
os_path_exists = os.path.exists
try:
with mock.patch("os.path.exists", side_effect=path_exists_mock):
reload_module(psutil._pslinux)
ret = psutil.cpu_freq()
assert ret, ret
assert ret.max == 0.0
assert ret.min == 0.0
for freq in psutil.cpu_freq(percpu=True):
assert freq.max == 0.0
assert freq.min == 0.0
finally:
reload_module(psutil._pslinux)
reload_module(psutil)
@pytest.mark.skipif(not HAS_CPU_FREQ, reason="not supported")
def test_emulate_data(self):
def open_mock(name, *args, **kwargs):
if name.endswith('/scaling_cur_freq') and name.startswith(
"/sys/devices/system/cpu/cpufreq/policy"
):
return io.BytesIO(b"500000")
elif name.endswith('/scaling_min_freq') and name.startswith(
"/sys/devices/system/cpu/cpufreq/policy"
):
return io.BytesIO(b"600000")
elif name.endswith('/scaling_max_freq') and name.startswith(
"/sys/devices/system/cpu/cpufreq/policy"
):
return io.BytesIO(b"700000")
elif name == '/proc/cpuinfo':
return io.BytesIO(b"cpu MHz : 500")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
with mock.patch("builtins.open", side_effect=open_mock):
with mock.patch('os.path.exists', return_value=True):
freq = psutil.cpu_freq()
assert freq.current == 500.0
# when /proc/cpuinfo is used min and max frequencies are not
# available and are set to 0.
if freq.min != 0.0:
assert freq.min == 600.0
if freq.max != 0.0:
assert freq.max == 700.0
@pytest.mark.skipif(not HAS_CPU_FREQ, reason="not supported")
def test_emulate_multi_cpu(self):
def open_mock(name, *args, **kwargs):
n = name
if n.endswith('/scaling_cur_freq') and n.startswith(
"/sys/devices/system/cpu/cpufreq/policy0"
):
return io.BytesIO(b"100000")
elif n.endswith('/scaling_min_freq') and n.startswith(
"/sys/devices/system/cpu/cpufreq/policy0"
):
return io.BytesIO(b"200000")
elif n.endswith('/scaling_max_freq') and n.startswith(
"/sys/devices/system/cpu/cpufreq/policy0"
):
return io.BytesIO(b"300000")
elif n.endswith('/scaling_cur_freq') and n.startswith(
"/sys/devices/system/cpu/cpufreq/policy1"
):
return io.BytesIO(b"400000")
elif n.endswith('/scaling_min_freq') and n.startswith(
"/sys/devices/system/cpu/cpufreq/policy1"
):
return io.BytesIO(b"500000")
elif n.endswith('/scaling_max_freq') and n.startswith(
"/sys/devices/system/cpu/cpufreq/policy1"
):
return io.BytesIO(b"600000")
elif name == '/proc/cpuinfo':
return io.BytesIO(b"cpu MHz : 100\ncpu MHz : 400")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
with mock.patch("builtins.open", side_effect=open_mock):
with mock.patch('os.path.exists', return_value=True):
with mock.patch(
'psutil._pslinux.cpu_count_logical', return_value=2
):
freq = psutil.cpu_freq(percpu=True)
assert freq[0].current == 100.0
if freq[0].min != 0.0:
assert freq[0].min == 200.0
if freq[0].max != 0.0:
assert freq[0].max == 300.0
assert freq[1].current == 400.0
if freq[1].min != 0.0:
assert freq[1].min == 500.0
if freq[1].max != 0.0:
assert freq[1].max == 600.0
@pytest.mark.skipif(not HAS_CPU_FREQ, reason="not supported")
def test_emulate_no_scaling_cur_freq_file(self):
# See: https://github.com/giampaolo/psutil/issues/1071
def open_mock(name, *args, **kwargs):
if name.endswith('/scaling_cur_freq'):
raise FileNotFoundError
if name.endswith('/cpuinfo_cur_freq'):
return io.BytesIO(b"200000")
elif name == '/proc/cpuinfo':
return io.BytesIO(b"cpu MHz : 200")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
with mock.patch("builtins.open", side_effect=open_mock):
with mock.patch('os.path.exists', return_value=True):
with mock.patch(
'psutil._pslinux.cpu_count_logical', return_value=1
):
freq = psutil.cpu_freq()
assert freq.current == 200
@pytest.mark.skipif(not LINUX, reason="LINUX only")
| TestSystemCPUFrequency |
python | networkx__networkx | networkx/algorithms/assortativity/tests/test_connectivity.py | {
"start": 75,
"end": 4978
} | class ____:
def test_degree_p4(self):
G = nx.path_graph(4)
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.5}
nd = nx.average_degree_connectivity(D)
assert nd == answer
answer = {1: 2.0, 2: 1.5}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source="in", target="in")
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source="in", target="in")
assert nd == answer
def test_degree_p4_weighted(self):
G = nx.path_graph(4)
G[1][2]["weight"] = 4
answer = {1: 2.0, 2: 1.8}
nd = nx.average_degree_connectivity(G, weight="weight")
assert nd == answer
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.8}
nd = nx.average_degree_connectivity(D, weight="weight")
assert nd == answer
answer = {1: 2.0, 2: 1.8}
D = G.to_directed()
nd = nx.average_degree_connectivity(
D, weight="weight", source="in", target="in"
)
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(
D, source="in", target="out", weight="weight"
)
assert nd == answer
def test_weight_keyword(self):
G = nx.path_graph(4)
G[1][2]["other"] = 4
answer = {1: 2.0, 2: 1.8}
nd = nx.average_degree_connectivity(G, weight="other")
assert nd == answer
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G, weight=None)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.8}
nd = nx.average_degree_connectivity(D, weight="other")
assert nd == answer
answer = {1: 2.0, 2: 1.8}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in")
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in")
assert nd == answer
def test_degree_barrat(self):
G = nx.star_graph(5)
G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)])
G[0][5]["weight"] = 5
nd = nx.average_degree_connectivity(G)[5]
assert nd == 1.8
nd = nx.average_degree_connectivity(G, weight="weight")[5]
assert nd == pytest.approx(3.222222, abs=1e-5)
def test_zero_deg(self):
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(1, 3)
G.add_edge(1, 4)
c = nx.average_degree_connectivity(G)
assert c == {1: 0, 3: 1}
c = nx.average_degree_connectivity(G, source="in", target="in")
assert c == {0: 0, 1: 0}
c = nx.average_degree_connectivity(G, source="in", target="out")
assert c == {0: 0, 1: 3}
c = nx.average_degree_connectivity(G, source="in", target="in+out")
assert c == {0: 0, 1: 3}
c = nx.average_degree_connectivity(G, source="out", target="out")
assert c == {0: 0, 3: 0}
c = nx.average_degree_connectivity(G, source="out", target="in")
assert c == {0: 0, 3: 1}
c = nx.average_degree_connectivity(G, source="out", target="in+out")
assert c == {0: 0, 3: 1}
def test_in_out_weight(self):
G = nx.DiGraph()
G.add_edge(1, 2, weight=1)
G.add_edge(1, 3, weight=1)
G.add_edge(3, 1, weight=1)
for s, t in permutations(["in", "out", "in+out"], 2):
c = nx.average_degree_connectivity(G, source=s, target=t)
cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight")
assert c == cw
def test_invalid_source(self):
with pytest.raises(nx.NetworkXError):
G = nx.DiGraph()
nx.average_degree_connectivity(G, source="bogus")
def test_invalid_target(self):
with pytest.raises(nx.NetworkXError):
G = nx.DiGraph()
nx.average_degree_connectivity(G, target="bogus")
def test_invalid_undirected_graph(self):
G = nx.Graph()
with pytest.raises(nx.NetworkXError):
nx.average_degree_connectivity(G, target="bogus")
with pytest.raises(nx.NetworkXError):
nx.average_degree_connectivity(G, source="bogus")
def test_single_node(self):
# TODO Is this really the intended behavior for providing a
# single node as the argument `nodes`? Shouldn't the function
# just return the connectivity value itself?
G = nx.trivial_graph()
conn = nx.average_degree_connectivity(G, nodes=0)
assert conn == {0: 0}
| TestNeighborConnectivity |
python | python-pillow__Pillow | src/PIL/Image.py | {
"start": 3350,
"end": 3560
} | class ____(IntEnum):
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
TRANSVERSE = 6
# transforms (also defined in Imaging.h)
| Transpose |
python | kamyu104__LeetCode-Solutions | Python/count-largest-group.py | {
"start": 54,
"end": 403
} | class ____(object):
def countLargestGroup(self, n):
"""
:type n: int
:rtype: int
"""
count = collections.Counter()
for x in xrange(1, n+1):
count[sum(map(int, str(x)))] += 1
max_count = max(count.itervalues())
return sum(v == max_count for v in count.itervalues())
| Solution |
python | openai__openai-python | src/openai/types/beta/realtime/session_updated_event.py | {
"start": 226,
"end": 489
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
session: Session
"""Realtime session object configuration."""
type: Literal["session.updated"]
"""The event type, must be `session.updated`."""
| SessionUpdatedEvent |
python | ray-project__ray | python/ray/experimental/channel/common.py | {
"start": 3918,
"end": 5625
} | class ____:
serialization_context = _SerializationContext()
_torch_available: Optional[bool] = None
_torch_device: Optional["torch.device"] = None
_current_stream: Optional["torch.cuda.Stream"] = None
def __init__(self):
# Used for the torch.Tensor accelerator transport.
self.communicators: Dict[str, "Communicator"] = {}
# Used for driver process to store actors in the communicator.
self.communicator_handles: Dict[str, "CommunicatorHandle"] = {}
@staticmethod
def get_current() -> "ChannelContext":
"""Get or create a singleton context.
If the context has not yet been created in this process, it will be
initialized with default settings.
"""
global _default_context
with _context_lock:
if _default_context is None:
_default_context = ChannelContext()
return _default_context
@property
def torch_available(self) -> bool:
"""
Check if torch package is available.
"""
if self._torch_available is not None:
return self._torch_available
try:
import torch # noqa: F401
except ImportError:
self._torch_available = False
return False
self._torch_available = True
return True
@property
def torch_device(self) -> "torch.device":
if self._torch_device is None:
self._torch_device = AcceleratorContext.get().get_accelerator_devices()[0]
return self._torch_device
def set_torch_device(self, device: "torch.device"):
self._torch_device = device
@PublicAPI(stability="alpha")
| ChannelContext |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py | {
"start": 107639,
"end": 112519
} | class ____(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_path = self._createVocabFile("i2f_vocab1.txt")
# vocabulary_file supports string and tensor
type_funcs = [str, constant_op.constant]
for type_func in type_funcs:
vocabulary_file = type_func(vocabulary_path)
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0,
delimiter=" ")
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
self.evaluate(features))
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", default_value),
self.evaluate(features))
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", default_value, default_value),
self.evaluate(features))
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Invalid vocab_size"):
_ = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.evaluate(lookup_ops.tables_initializer())
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(features)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((b"salad", b"surgery", b"UNK"), self.evaluate(features))
| IndexToStringTableFromFileTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsNone1.py | {
"start": 1405,
"end": 2040
} | class ____(Protocol):
def __bool__(self) -> Literal[False]: ...
def func7(x: NoneProto | None):
if x is None:
reveal_type(x, expected_text="None")
else:
reveal_type(x, expected_text="NoneProto")
_T3 = TypeVar("_T3", bound=None | int)
def func8(x: _T3) -> _T3:
if x is None:
reveal_type(x, expected_text="None*")
else:
reveal_type(x, expected_text="int*")
return x
_T4 = TypeVar("_T4")
def func9(value: type[_T4] | None):
if value is None:
reveal_type(value, expected_text="None")
else:
reveal_type(value, expected_text="type[_T4@func9]")
| NoneProto |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 71630,
"end": 72377
} | class ____(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
| TestFileField |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 10330,
"end": 10429
} | class ____(ArrayRHSMixin, lookups.ContainedBy):
pass
@ArrayField.register_lookup
| ArrayContainedBy |
python | huggingface__transformers | tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py | {
"start": 8612,
"end": 10725
} | class ____(VisionTextDualEncoderMixin, unittest.TestCase):
def get_pretrained_model_and_inputs(self):
model = VisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert"
)
batch_size = 13
pixel_values = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
]
)
input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
attention_mask = random_attention_mask([batch_size, 4])
inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def get_vision_text_model(self, vision_config, text_config):
vision_model = ViTModel(vision_config).eval()
text_model = BertModel(text_config).eval()
return vision_model, text_model
def prepare_config_and_inputs(self):
vit_model_tester = ViTModelTester(self)
bert_model_tester = BertModelTester(self)
vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs()
text_config_and_inputs = bert_model_tester.prepare_config_and_inputs()
vision_config, pixel_values, _ = vision_config_and_inputs
(
text_config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_torch
| ViTBertModelTest |
python | scikit-learn__scikit-learn | sklearn/cross_decomposition/_pls.py | {
"start": 30685,
"end": 37008
} | class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Partial Least Square SVD.
This transformer simply performs a SVD on the cross-covariance matrix
`X'y`. It is able to project both the training data `X` and the targets
`y`. The training data `X` is projected on the left singular vectors, while
the targets are projected on the right singular vectors.
Read more in the :ref:`User Guide <cross_decomposition>`.
.. versionadded:: 0.8
Parameters
----------
n_components : int, default=2
The number of components to keep. Should be in `[1,
min(n_samples, n_features, n_targets)]`.
scale : bool, default=True
Whether to scale `X` and `y`.
copy : bool, default=True
Whether to copy `X` and `y` in fit before applying centering, and
potentially scaling. If `False`, these operations will be done inplace,
modifying both arrays.
Attributes
----------
x_weights_ : ndarray of shape (n_features, n_components)
The left singular vectors of the SVD of the cross-covariance matrix.
Used to project `X` in :meth:`transform`.
y_weights_ : ndarray of (n_targets, n_components)
The right singular vectors of the SVD of the cross-covariance matrix.
Used to project `X` in :meth:`transform`.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PLSCanonical : Partial Least Squares transformer and regressor.
CCA : Canonical Correlation Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_decomposition import PLSSVD
>>> X = np.array([[0., 0., 1.],
... [1., 0., 0.],
... [2., 2., 2.],
... [2., 5., 4.]])
>>> y = np.array([[0.1, -0.2],
... [0.9, 1.1],
... [6.2, 5.9],
... [11.9, 12.3]])
>>> pls = PLSSVD(n_components=2).fit(X, y)
>>> X_c, y_c = pls.transform(X, y)
>>> X_c.shape, y_c.shape
((4, 2), (4, 2))
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"scale": ["boolean"],
"copy": ["boolean"],
}
def __init__(self, n_components=2, *, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training samples.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Targets.
Returns
-------
self : object
Fitted estimator.
"""
check_consistent_length(X, y)
X = validate_data(
self,
X,
dtype=np.float64,
force_writeable=True,
copy=self.copy,
ensure_min_samples=2,
)
y = check_array(
y,
input_name="y",
dtype=np.float64,
force_writeable=True,
copy=self.copy,
ensure_2d=False,
)
if y.ndim == 1:
y = y.reshape(-1, 1)
# we'll compute the SVD of the cross-covariance matrix = X.T.dot(y)
# This matrix rank is at most min(n_samples, n_features, n_targets) so
# n_components cannot be bigger than that.
n_components = self.n_components
rank_upper_bound = min(X.shape[0], X.shape[1], y.shape[1])
if n_components > rank_upper_bound:
raise ValueError(
f"`n_components` upper bound is {rank_upper_bound}. "
f"Got {n_components} instead. Reduce `n_components`."
)
X, y, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(
X, y, self.scale
)
# Compute SVD of cross-covariance matrix
C = np.dot(X.T, y)
U, s, Vt = svd(C, full_matrices=False)
U = U[:, :n_components]
Vt = Vt[:n_components]
U, Vt = svd_flip(U, Vt)
V = Vt.T
self.x_weights_ = U
self.y_weights_ = V
self._n_features_out = self.x_weights_.shape[1]
return self
def transform(self, X, y=None):
"""
Apply the dimensionality reduction.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to be transformed.
y : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Targets.
Returns
-------
x_scores : array-like or tuple of array-like
The transformed data `X_transformed` if `y is not None`,
`(X_transformed, y_transformed)` otherwise.
"""
check_is_fitted(self)
X = validate_data(self, X, dtype=np.float64, reset=False)
Xr = (X - self._x_mean) / self._x_std
x_scores = np.dot(Xr, self.x_weights_)
if y is not None:
y = check_array(y, input_name="y", ensure_2d=False, dtype=np.float64)
if y.ndim == 1:
y = y.reshape(-1, 1)
yr = (y - self._y_mean) / self._y_std
y_scores = np.dot(yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None):
"""Learn and apply the dimensionality reduction.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training samples.
y : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Targets.
Returns
-------
out : array-like or tuple of array-like
The transformed data `X_transformed` if `y is not None`,
`(X_transformed, y_transformed)` otherwise.
"""
return self.fit(X, y).transform(X, y)
| PLSSVD |
python | networkx__networkx | networkx/algorithms/tests/test_summarization.py | {
"start": 4884,
"end": 7970
} | class ____:
def build_original_graph(self):
"""
Builds graph shown in the original research paper
"""
original_matrix = [
("1", "CB"),
("2", "ABC"),
("3", ["A", "B", "6"]),
("4", "ABC"),
("5", "AB"),
("6", ["5"]),
("A", ["6"]),
]
graph = nx.Graph()
for source, targets in original_matrix:
for target in targets:
graph.add_edge(source, target)
return graph
def test_empty(self):
"""
Verify that an empty undirected graph results in no compressor nodes
"""
G = nx.Graph()
compressed_G, c_nodes = nx.dedensify(G, threshold=2)
assert c_nodes == set()
def setup_method(self):
self.c_nodes = ("6AB", "ABC")
def build_compressed_graph(self):
compressed_matrix = [
("1", ["B", "C"]),
("2", ["ABC"]),
("3", ["6AB"]),
("4", ["ABC"]),
("5", ["6AB"]),
("6", ["6AB", "A"]),
("A", ["6AB", "ABC"]),
("B", ["ABC", "6AB"]),
("C", ["ABC"]),
]
compressed_graph = nx.Graph()
for source, targets in compressed_matrix:
for target in targets:
compressed_graph.add_edge(source, target)
return compressed_graph
def test_dedensify_edges(self):
"""
Verifies that dedensify produced correct compressor nodes and the
correct edges to/from the compressor nodes in an undirected graph
"""
G = self.build_original_graph()
c_G, c_nodes = nx.dedensify(G, threshold=2)
v_compressed_G = self.build_compressed_graph()
for s, t in c_G.edges():
o_s = "".join(sorted(s))
o_t = "".join(sorted(t))
has_compressed_edge = c_G.has_edge(s, t)
verified_has_compressed_edge = v_compressed_G.has_edge(o_s, o_t)
assert has_compressed_edge == verified_has_compressed_edge
assert len(c_nodes) == len(self.c_nodes)
def test_dedensify_edge_count(self):
"""
Verifies that dedensify produced the correct number of edges in an
undirected graph
"""
G = self.build_original_graph()
c_G, c_nodes = nx.dedensify(G, threshold=2, copy=True)
compressed_edge_count = len(c_G.edges())
verified_original_edge_count = len(G.edges())
assert compressed_edge_count <= verified_original_edge_count
verified_compressed_G = self.build_compressed_graph()
verified_compressed_edge_count = len(verified_compressed_G.edges())
assert compressed_edge_count == verified_compressed_edge_count
@pytest.mark.parametrize(
"graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph]
)
def test_summarization_empty(graph_type):
G = graph_type()
summary_graph = nx.snap_aggregation(G, node_attributes=("color",))
assert nx.is_isomorphic(summary_graph, G)
| TestUnDirectedDedensification |
python | numba__numba | numba/tests/test_ir_inlining.py | {
"start": 1330,
"end": 4686
} | class ____(TestCase):
_DEBUG = False
inline_opt_as_bool = {'always': True, 'never': False}
# --------------------------------------------------------------------------
# Example cost model
def sentinel_17_cost_model(self, func_ir):
# sentinel 17 cost model, this is a fake cost model that will return
# True (i.e. inline) if the ir.FreeVar(17) is found in the func_ir,
for blk in func_ir.blocks.values():
for stmt in blk.body:
if isinstance(stmt, ir.Assign):
if isinstance(stmt.value, ir.FreeVar):
if stmt.value.value == 17:
return True
return False
# --------------------------------------------------------------------------
def check(self, test_impl, *args, **kwargs):
inline_expect = kwargs.pop('inline_expect', None)
assert inline_expect
block_count = kwargs.pop('block_count', 1)
assert not kwargs
for k, v in inline_expect.items():
assert isinstance(k, str)
assert isinstance(v, bool)
j_func = njit(pipeline_class=IRPreservingTestPipeline)(test_impl)
# check they produce the same answer first!
self.assertEqual(test_impl(*args), j_func(*args))
# make sure IR doesn't have branches
fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir']
fir.blocks = ir_utils.simplify_CFG(fir.blocks)
if self._DEBUG:
print("FIR".center(80, "-"))
fir.dump()
if block_count != 'SKIP':
self.assertEqual(len(fir.blocks), block_count)
block = next(iter(fir.blocks.values()))
# if we don't expect the function to be inlined then make sure there is
# 'call' present still
exprs = [x for x in block.find_exprs()]
assert exprs
for k, v in inline_expect.items():
found = False
for expr in exprs:
if getattr(expr, 'op', False) == 'call':
func_defn = fir.get_definition(expr.func)
found |= func_defn.name == k
elif ir_utils.is_operator_or_getitem(expr):
found |= expr.fn.__name__ == k
self.assertFalse(found == v)
return fir # for use in further analysis
# used in _gen_involved
_GLOBAL = 1234
def _gen_involved():
_FREEVAR = 0xCAFE
def foo(a, b, c=12, d=1j, e=None):
f = a + b
a += _FREEVAR
g = np.zeros(c, dtype=np.complex64)
h = f + g
i = 1j / d
# For SSA, zero init, n and t
n = 0
t = 0
if np.abs(i) > 0:
k = h / i
l = np.arange(1, c + 1)
m = np.sqrt(l - g) + e * k
if np.abs(m[0]) < 1:
for o in range(a):
n += 0
if np.abs(n) < 3:
break
n += m[2]
p = g / l
q = []
for r in range(len(p)):
q.append(p[r])
if r > 4 + 1:
s = 123
t = 5
if s > 122 - c:
t += s
t += q[0] + _GLOBAL
return f + o + r + t + r + a + n
return foo
| InliningBase |
python | dask__distributed | distributed/deploy/old_ssh.py | {
"start": 10080,
"end": 15189
} | class ____:
def __init__(
self,
scheduler_addr,
scheduler_port,
worker_addrs,
nthreads=0,
n_workers=None,
ssh_username=None,
ssh_port=22,
ssh_private_key=None,
nohost=False,
logdir=None,
remote_python=None,
memory_limit=None,
worker_port=None,
nanny_port=None,
remote_dask_worker="distributed.cli.dask_worker",
local_directory=None,
**kwargs,
):
self.scheduler_addr = scheduler_addr
self.scheduler_port = scheduler_port
self.nthreads = nthreads
nprocs = kwargs.pop("nprocs", None)
if kwargs:
raise TypeError(
f"__init__() got an unexpected keyword argument {', '.join(kwargs.keys())}"
)
if nprocs is not None and n_workers is not None:
raise ValueError(
"Both nprocs and n_workers were specified. Use n_workers only."
)
elif nprocs is not None:
warnings.warn(
"The nprocs argument will be removed in a future release. It has been "
"renamed to n_workers.",
FutureWarning,
)
n_workers = nprocs
elif n_workers is None:
n_workers = 1
self.n_workers = n_workers
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.nohost = nohost
self.remote_python = remote_python
self.memory_limit = memory_limit
self.worker_port = worker_port
self.nanny_port = nanny_port
self.remote_dask_worker = remote_dask_worker
self.local_directory = local_directory
# Generate a universal timestamp to use for log files
import datetime
if logdir is not None:
logdir = os.path.join(
logdir,
"dask-ssh_" + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
)
print(
bcolors.WARNING
+ f'Output will be redirected to logfiles stored locally on individual worker nodes under "{logdir}".'
+ bcolors.ENDC
)
self.logdir = logdir
# Keep track of all running threads
self.threads = []
# Start the scheduler node
self.scheduler = start_scheduler(
logdir,
scheduler_addr,
scheduler_port,
ssh_username,
ssh_port,
ssh_private_key,
remote_python,
)
# Start worker nodes
self.workers = []
for addr in worker_addrs:
self.add_worker(addr)
@gen.coroutine
def _start(self):
pass
@property
def nprocs(self):
warnings.warn(
"The nprocs attribute will be removed in a future release. It has been "
"renamed to n_workers.",
FutureWarning,
)
return self.n_workers
@nprocs.setter
def nprocs(self, value):
warnings.warn(
"The nprocs attribute will be removed in a future release. It has been "
"renamed to n_workers.",
FutureWarning,
)
self.n_workers = value
@property
def scheduler_address(self):
return "%s:%d" % (self.scheduler_addr, self.scheduler_port)
def monitor_remote_processes(self):
# Form a list containing all processes, since we treat them equally from here on out.
all_processes = [self.scheduler] + self.workers
try:
while True:
for process in all_processes:
while not process["output_queue"].empty():
print(process["output_queue"].get())
# Kill some time and free up CPU before starting the next sweep
# through the processes.
sleep(0.1)
# end while true
except KeyboardInterrupt:
pass # Return execution to the calling process
def add_worker(self, address):
self.workers.append(
start_worker(
self.logdir,
self.scheduler_addr,
self.scheduler_port,
address,
self.nthreads,
self.n_workers,
self.ssh_username,
self.ssh_port,
self.ssh_private_key,
self.nohost,
self.memory_limit,
self.worker_port,
self.nanny_port,
self.remote_python,
self.remote_dask_worker,
self.local_directory,
)
)
def shutdown(self):
all_processes = [self.scheduler] + self.workers
for process in all_processes:
process["input_queue"].put("shutdown")
process["thread"].join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.shutdown()
| SSHCluster |
python | django__django | tests/update/models.py | {
"start": 876,
"end": 1181
} | class ____(models.Model):
foo = models.ForeignKey(Foo, models.CASCADE, to_field="target")
o2o_foo = models.OneToOneField(
Foo, models.CASCADE, related_name="o2o_bar", null=True
)
m2m_foo = models.ManyToManyField(Foo, related_name="m2m_foo")
x = models.IntegerField(default=0)
| Bar |
python | kamyu104__LeetCode-Solutions | Python/longest-palindromic-subsequence.py | {
"start": 31,
"end": 607
} | class ____(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
if s == s[::-1]: # optional, to optimize special case
return len(s)
dp = [[1] * len(s) for _ in xrange(2)]
for i in reversed(xrange(len(s))):
for j in xrange(i+1, len(s)):
if s[i] == s[j]:
dp[i%2][j] = 2 + dp[(i+1)%2][j-1] if i+1 <= j-1 else 2
else:
dp[i%2][j] = max(dp[(i+1)%2][j], dp[i%2][j-1])
return dp[0][-1]
| Solution |
python | huggingface__transformers | src/transformers/models/falcon/modeling_falcon.py | {
"start": 4414,
"end": 9756
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: FalconConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[FalconConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = torch.tensor(
2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
)
powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(
2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
# Note: alibi will added to the attention bias that will be applied to the query, key product of attention
# => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
# => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
# => the query_length dimension will then be broadcasted correctly
# This is more or less identical to T5's relative position bias:
# https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None].bfloat16() * arange_tensor
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
# Copied from transformers.models.bloom.modeling_bloom.dropout_add
def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
"""
Dropout add function
Args:
x (`torch.tensor`):
input tensor
residual (`torch.tensor`):
residual tensor
prob (`float`):
dropout probability
training (`bool`):
training mode
"""
out = F.dropout(x, p=prob, training=training)
out = residual + out
return out
| FalconRotaryEmbedding |
python | pennersr__django-allauth | allauth/socialaccount/views.py | {
"start": 883,
"end": 2454
} | class ____(
RedirectAuthenticatedUserMixin,
CloseableSignupMixin,
AjaxCapableProcessFormViewMixin,
FormView,
):
form_class = SignupForm
template_name = "socialaccount/signup." + account_settings.TEMPLATE_EXTENSION
def get_form_class(self):
return get_form_class(app_settings.FORMS, "signup", self.form_class)
@method_decorator(login_not_required)
def dispatch(self, request, *args, **kwargs):
self.sociallogin = flows.signup.get_pending_signup(request)
if not self.sociallogin:
return HttpResponseRedirect(reverse("account_login"))
return super(SignupView, self).dispatch(request, *args, **kwargs)
def is_open(self):
return get_adapter(self.request).is_open_for_signup(
self.request, self.sociallogin
)
def get_form_kwargs(self):
ret = super(SignupView, self).get_form_kwargs()
ret["sociallogin"] = self.sociallogin
return ret
def form_valid(self, form):
return flows.signup.signup_by_form(self.request, self.sociallogin, form)
def get_context_data(self, **kwargs):
ret = super(SignupView, self).get_context_data(**kwargs)
ret.update(
dict(
site=get_current_site(self.request),
account=self.sociallogin.account,
)
)
return ret
def get_authenticated_redirect_url(self):
return reverse("socialaccount_connections")
signup = SignupView.as_view()
@method_decorator(login_not_required, name="dispatch")
| SignupView |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/benchmarks/csv_dataset_benchmark.py | {
"start": 1115,
"end": 4788
} | class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.experimental.CsvDataset`."""
FLOAT_VAL = '1.23456E12'
STR_VAL = string.ascii_letters * 10
def _set_up(self, str_val):
# Since this isn't test.TestCase, have to manually create a test dir
gfile.MakeDirs(googletest.GetTempDir())
self._temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._num_cols = [4, 64, 256]
self._num_per_iter = 5000
self._filenames = []
for n in self._num_cols:
fn = os.path.join(self._temp_dir, 'file%d.csv' % n)
with open(fn, 'w') as f:
# Just write 100 rows and use `repeat`... Assumes the cost
# of creating an iterator is not significant
row = ','.join(str_val for _ in range(n))
f.write('\n'.join(row for _ in range(100)))
self._filenames.append(fn)
def _tear_down(self):
gfile.DeleteRecursively(self._temp_dir)
def _run_benchmark(self, dataset, num_cols, prefix, benchmark_id):
self.run_and_report_benchmark(
dataset=dataset,
num_elements=self._num_per_iter,
name='%s_with_cols_%d' % (prefix, num_cols),
iters=10,
extras={
'model_name': 'csv.benchmark.%d' % benchmark_id,
'parameters': '%d' % num_cols,
},
warmup=True)
def benchmark_map_with_floats(self):
self._set_up(self.FLOAT_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [[0.0]] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop
self._run_benchmark(
dataset=dataset,
num_cols=num_cols,
prefix='csv_float_map_decode_csv',
benchmark_id=1)
self._tear_down()
def benchmark_map_with_strings(self):
self._set_up(self.STR_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [['']] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop
self._run_benchmark(
dataset=dataset,
num_cols=num_cols,
prefix='csv_strings_map_decode_csv',
benchmark_id=2)
self._tear_down()
def benchmark_csv_dataset_with_floats(self):
self._set_up(self.FLOAT_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [[0.0]] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop
self._run_benchmark(
dataset=dataset,
num_cols=num_cols,
prefix='csv_float_fused_dataset',
benchmark_id=3)
self._tear_down()
def benchmark_csv_dataset_with_strings(self):
self._set_up(self.STR_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [['']] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop
self._run_benchmark(
dataset=dataset,
num_cols=num_cols,
prefix='csv_strings_fused_dataset',
benchmark_id=4)
self._tear_down()
if __name__ == '__main__':
benchmark_base.test.main()
| CsvDatasetBenchmark |
python | wandb__wandb | wandb/automations/_utils.py | {
"start": 1990,
"end": 2465
} | class ____(Protocol):
id: str
def extract_id(obj: HasId | str) -> str:
return obj.id if hasattr(obj, "id") else obj
# ---------------------------------------------------------------------------
ACTION_CONFIG_KEYS: dict[ActionType, str] = {
ActionType.NOTIFICATION: "notification_action_input",
ActionType.GENERIC_WEBHOOK: "generic_webhook_action_input",
ActionType.NO_OP: "no_op_action_input",
ActionType.QUEUE_JOB: "queue_job_action_input",
}
| HasId |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_indexing.py | {
"start": 3851,
"end": 10237
} | class ____:
def test_getitem_slice(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
assert sliced == "d"
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(sliced, expected)
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
c = Categorical(
np.random.default_rng(2).integers(0, 5, size=150000).astype(np.int8)
)
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
tm.assert_numpy_array_equal(result, expected)
def test_periodindex(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"],
freq="M",
)
cat1 = Categorical(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
tm.assert_numpy_array_equal(cat1._codes, exp_arr)
tm.assert_index_equal(cat1.categories, exp_idx)
idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"],
freq="M",
)
cat2 = Categorical(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
exp_idx2 = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
tm.assert_numpy_array_equal(cat2._codes, exp_arr)
tm.assert_index_equal(cat2.categories, exp_idx2)
idx3 = PeriodIndex(
[
"2013-12",
"2013-11",
"2013-10",
"2013-09",
"2013-08",
"2013-07",
"2013-05",
],
freq="M",
)
cat3 = Categorical(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
exp_idx = PeriodIndex(
[
"2013-05",
"2013-07",
"2013-08",
"2013-09",
"2013-10",
"2013-11",
"2013-12",
],
freq="M",
)
tm.assert_numpy_array_equal(cat3._codes, exp_arr)
tm.assert_index_equal(cat3.categories, exp_idx)
@pytest.mark.parametrize(
"null_val",
[None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"],
)
def test_periodindex_on_null_types(self, null_val):
# GH 46673
result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D")
expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]")
assert result[2] is NaT
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_categories_assignments_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items "
"as the old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
# Combinations of sorted/unique:
@pytest.mark.parametrize(
"idx_values", [[1, 2, 3, 4], [1, 3, 2, 4], [1, 3, 3, 4], [1, 2, 2, 4]]
)
# Combinations of missing/unique
@pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]])
@pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex])
@pytest.mark.parametrize("dtype", [None, "category", "key"])
def test_get_indexer_non_unique(self, idx_values, key_values, key_class, dtype):
# GH 21448
key = key_class(key_values, categories=range(1, 6))
if dtype == "key":
dtype = key.dtype
# Test for flat index and CategoricalIndex with same/different cats:
idx = Index(idx_values, dtype=dtype)
expected, exp_miss = idx.get_indexer_non_unique(key_values)
result, res_miss = idx.get_indexer_non_unique(key)
tm.assert_numpy_array_equal(expected, result)
tm.assert_numpy_array_equal(exp_miss, res_miss)
exp_unique = idx.unique().get_indexer(key_values)
res_unique = idx.unique().get_indexer(key)
tm.assert_numpy_array_equal(res_unique, exp_unique)
def test_where_unobserved_nan(self):
ser = Series(Categorical(["a", "b"]))
result = ser.where([True, False])
expected = Series(Categorical(["a", None], categories=["a", "b"]))
tm.assert_series_equal(result, expected)
# all NA
ser = Series(Categorical(["a", "b"]))
result = ser.where([False, False])
expected = Series(Categorical([None, None], categories=["a", "b"]))
tm.assert_series_equal(result, expected)
def test_where_unobserved_categories(self):
ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
result = ser.where([True, True, False], other="b")
expected = Series(Categorical(["a", "b", "b"], categories=ser.cat.categories))
tm.assert_series_equal(result, expected)
def test_where_other_categorical(self):
ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
other = Categorical(["b", "c", "a"], categories=["a", "c", "b", "d"])
result = ser.where([True, False, True], other)
expected = Series(Categorical(["a", "c", "c"], dtype=ser.dtype))
tm.assert_series_equal(result, expected)
def test_where_new_category_raises(self):
ser = Series(Categorical(["a", "b", "c"]))
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
ser.where([True, False, True], "d")
def test_where_ordered_differs_rasies(self):
ser = Series(
Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"], ordered=True)
)
other = Categorical(
["b", "c", "a"], categories=["a", "c", "b", "d"], ordered=True
)
with pytest.raises(TypeError, match="without identical categories"):
ser.where([True, False, True], other)
| TestCategoricalIndexing |
python | readthedocs__readthedocs.org | readthedocs/projects/tasks/search.py | {
"start": 4455,
"end": 13456
} | class ____(Indexer):
def __init__(self, version: Version, build: Build):
self.version = version
self.build = build
self._hashes = {}
def process(self, html_file: HTMLFile, sync_id: int):
self._hashes[html_file.path] = html_file.processed_json["main_content_hash"]
def collect(self, sync_id: int):
manifest = FileTreeDiffManifest(
build_id=self.build.id,
files=[
FileTreeDiffManifestFile(path=path, main_content_hash=hash)
for path, hash in self._hashes.items()
],
)
write_manifest(self.version, manifest)
if self.version.is_external and self.version.project.show_build_overview_in_comment:
post_build_overview.delay(self.build.id)
def _get_indexers(*, version: Version, build: Build, search_index_name=None):
build_config = build.config or {}
search_config = build_config.get("search", {})
search_ranking = search_config.get("ranking", {})
search_ignore = search_config.get("ignore", [])
indexers = []
# NOTE: The search indexer must be before the index file indexer.
# This is because saving the objects in the DB will give them an id,
# and we neeed this id to be `None` when indexing the objects in ES.
# ES will generate a unique id for each document.
# NOTE: We don't create a search indexer for:
# - External versions
# - Versions from projects with search indexing disabled
# - Versions from delisted projects
skip_search_indexing = (
not version.project.search_indexing_enabled
or version.is_external
or version.project.delisted
)
if not skip_search_indexing:
search_indexer = SearchIndexer(
project=version.project,
version=version,
search_ranking=search_ranking,
search_ignore=search_ignore,
search_index_name=search_index_name,
)
indexers.append(search_indexer)
# File tree diff is under a feature flag for now,
# and we only allow to compare PR previews against the latest version.
base_version = (
version.project.addons.options_base_version.slug
if version.project.addons.options_base_version
else LATEST
)
create_manifest = version.project.addons.filetreediff_enabled and (
version.is_external or version.slug == base_version or settings.RTD_FILETREEDIFF_ALL
)
if create_manifest:
file_manifest_indexer = FileManifestIndexer(
version=version,
build=build,
)
indexers.append(file_manifest_indexer)
index_file_indexer = IndexFileIndexer(
project=version.project,
version=version,
)
indexers.append(index_file_indexer)
return indexers
def _process_files(*, version: Version, indexers: list[Indexer]):
storage_path = version.project.get_storage_path(
type_="html",
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
# A sync ID is a number different than the current `build` attribute (pending rename),
# it's used to differentiate the files from the current sync from the previous one.
# This is useful to easily delete the previous files from the DB and ES.
# See https://github.com/readthedocs/readthedocs.org/issues/10734.
imported_file_build_id = version.imported_files.values_list("build", flat=True).first()
sync_id = imported_file_build_id + 1 if imported_file_build_id else 1
log.debug(
"Using sync ID for search indexing",
sync_id=sync_id,
)
for root, __, filenames in build_media_storage.walk(storage_path):
for filename in filenames:
# We don't care about non-HTML files (for now?).
if not filename.endswith(".html"):
continue
full_path = build_media_storage.join(root, filename)
# Generate a relative path for storage similar to os.path.relpath
relpath = full_path.removeprefix(storage_path).lstrip("/")
html_file = HTMLFile(
project=version.project,
version=version,
path=relpath,
name=filename,
# TODO: We are setting the commit field since it's required,
# but it isn't used, and will be removed in the future
# together with other fields.
commit="unknown",
build=sync_id,
)
for indexer in indexers:
try:
indexer.process(html_file, sync_id)
except Exception:
log.exception(
"Failed to process HTML file",
html_file=html_file.path,
indexer=indexer.__class__.__name__,
version_slug=version.slug,
)
for indexer in indexers:
try:
indexer.collect(sync_id)
except Exception:
log.exception(
"Failed to collect indexer results",
indexer=indexer.__class__.__name__,
version_slug=version.slug,
)
# This signal is used for purging the CDN.
files_changed.send(
sender=Project,
project=version.project,
version=version,
)
return sync_id
@app.task(queue="reindex")
def index_build(build_id):
"""Create imported files and search index for the build."""
build = Build.objects.filter(pk=build_id).select_related("version", "version__project").first()
if not build:
log.debug("Skipping search indexing. Build object doesn't exists.", build_id=build_id)
return
# The version may have been deleted.
version = build.version
if not version:
log.debug(
"Skipping search indexing. Build doesn't have a version attach it to it.",
build_id=build_id,
)
return
structlog.contextvars.bind_contextvars(
project_slug=version.project.slug,
version_slug=version.slug,
build_id=build.id,
)
try:
indexers = _get_indexers(
version=version,
build=build,
)
return _process_files(version=version, indexers=indexers)
except Exception:
log.exception("Failed to index build")
@app.task(queue="reindex")
def reindex_version(version_id, search_index_name=None):
"""
Re-create imported files and search index for the version.
The latest successful build is used for the re-creation.
"""
version = Version.objects.filter(pk=version_id).select_related("project").first()
if not version or not version.built:
log.debug(
"Skipping search indexing. Version doesn't exist or is not built.",
version_id=version_id,
)
return
latest_successful_build = (
version.builds.filter(state=BUILD_STATE_FINISHED, success=True).order_by("-date").first()
)
# If the version doesn't have a successful
# build, we don't have files to index.
if not latest_successful_build:
log.debug(
"Skipping search indexing. Version doesn't have a successful build.",
version_id=version_id,
)
return
structlog.contextvars.bind_contextvars(
project_slug=version.project.slug,
version_slug=version.slug,
build_id=latest_successful_build.id,
)
try:
indexers = _get_indexers(
version=version,
build=latest_successful_build,
search_index_name=search_index_name,
)
_process_files(version=version, indexers=indexers)
except Exception:
log.exception("Failed to re-index version")
@app.task(queue="reindex")
def index_project(project_slug, skip_if_exists=False):
"""
Index all active versions of the project.
If ``skip_if_exists`` is True, we first check if
the project has at least one version indexed,
and skip the re-indexing if it does.
"""
structlog.contextvars.bind_contextvars(project_slug=project_slug)
project = Project.objects.filter(slug=project_slug).first()
if not project:
log.debug("Project doesn't exist.")
return
if skip_if_exists:
if PageDocument().search().filter("term", project=project.slug).count():
log.debug("Skipping search indexing. Project is already indexed.")
return
versions = project.versions(manager=INTERNAL).filter(active=True, built=True)
for version in versions:
reindex_version(version_id=version.id)
@app.task(queue="web")
def remove_search_indexes(project_slug, version_slug=None):
"""Wrapper around ``remove_indexed_files`` to make it a task."""
remove_indexed_files(
project_slug=project_slug,
version_slug=version_slug,
)
| FileManifestIndexer |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/hoverlabel/_font.py | {
"start": 233,
"end": 17158
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo.hoverlabel"
_path_str = "scattergeo.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 15274,
"end": 15422
} | class ____(_DropBase["Table"]):
def to_metadata(self, metadata: MetaData, table: Table) -> Self:
raise NotImplementedError()
| TableDropDDL |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1324564,
"end": 1324727
} | class ____(sgqlc.types.Type, ProjectV2FieldCommon, Node):
"""A field inside a project."""
__schema__ = github_schema
__field_names__ = ()
| ProjectV2Field |
python | getlogbook__logbook | benchmark/bench_noop_filter_on_handler.py | {
"start": 179,
"end": 490
} | class ____(StreamHandler):
def should_handle(self, record):
return False
def run():
out = StringIO()
with NullHandler():
with CustomStreamHandler(out):
for _ in range(500):
log.warning("this is not handled")
assert not out.getvalue()
| CustomStreamHandler |
python | spack__spack | lib/spack/spack/util/typing.py | {
"start": 206,
"end": 764
} | class ____(Protocol):
"""Objects that support =, !=, <, <=, >, and >=."""
def __eq__(self, other: Any) -> bool:
raise NotImplementedError
def __ne__(self, other: Any) -> bool:
raise NotImplementedError
def __lt__(self, other: Any) -> bool:
raise NotImplementedError
def __le__(self, other: Any) -> bool:
raise NotImplementedError
def __gt__(self, other: Any) -> bool:
raise NotImplementedError
def __ge__(self, other: Any) -> bool:
raise NotImplementedError
| SupportsRichComparison |
python | Textualize__textual | tests/toggles/test_radiobutton.py | {
"start": 121,
"end": 1955
} | class ____(App[None]):
def __init__(self):
super().__init__()
self.events_received = []
def compose(self) -> ComposeResult:
yield RadioButton("Test", id="rb1")
yield RadioButton(id="rb2")
yield RadioButton(value=True, id="rb3")
def on_radio_button_changed(self, event: RadioButton.Changed) -> None:
self.events_received.append(
(
event.radio_button.id,
event.radio_button.value,
event.radio_button == event.control,
)
)
async def test_radio_button_initial_state() -> None:
"""The initial states of the radio buttons should be as we specified."""
async with RadioButtonApp().run_test() as pilot:
assert [button.value for button in pilot.app.query(RadioButton)] == [
False,
False,
True,
]
assert [button.has_class("-on") for button in pilot.app.query(RadioButton)] == [
False,
False,
True,
]
assert pilot.app.events_received == []
async def test_radio_button_toggle() -> None:
"""Test the status of the radio buttons after they've been toggled."""
async with RadioButtonApp().run_test() as pilot:
for box in pilot.app.query(RadioButton):
box.toggle()
assert [button.value for button in pilot.app.query(RadioButton)] == [
True,
True,
False,
]
assert [button.has_class("-on") for button in pilot.app.query(RadioButton)] == [
True,
True,
False,
]
await pilot.pause()
assert pilot.app.events_received == [
("rb1", True, True),
("rb2", True, True),
("rb3", False, True),
]
| RadioButtonApp |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/importwizard.py | {
"start": 8798,
"end": 11704
} | class ____(QAbstractTableModel):
"""Import wizard preview table model"""
def __init__(self, data=None, parent=None):
QAbstractTableModel.__init__(self, parent)
data = [] if data is None else data
self._data = data
def rowCount(self, parent=QModelIndex()):
"""Return row count"""
return len(self._data)
def columnCount(self, parent=QModelIndex()):
"""Return column count"""
return len(self._data[0])
def _display_data(self, index):
"""Return a data element"""
return to_qvariant(self._data[index.row()][index.column()])
def data(self, index, role=Qt.DisplayRole):
"""Return a model data element"""
if not index.isValid():
return to_qvariant()
if role == Qt.DisplayRole:
return self._display_data(index)
elif role == Qt.BackgroundColorRole:
return to_qvariant(get_color(
self._data[index.row()][index.column()], 0.5))
elif role == Qt.TextAlignmentRole:
return to_qvariant(int(Qt.AlignRight|Qt.AlignVCenter))
return to_qvariant()
def setData(self, index, value, role=Qt.EditRole):
"""Set model data"""
return False
def get_data(self):
"""Return a copy of model data"""
return self._data[:][:]
def parse_data_type(self, index, **kwargs):
"""Parse a type to an other type"""
if not index.isValid():
return False
try:
if kwargs['atype'] == "date":
self._data[index.row()][index.column()] = \
datestr_to_datetime(self._data[index.row()][index.column()],
kwargs['dayfirst']).date()
elif kwargs['atype'] == "perc":
_tmp = self._data[index.row()][index.column()].replace("%", "")
self._data[index.row()][index.column()] = eval(_tmp)/100.
elif kwargs['atype'] == "account":
_tmp = self._data[index.row()][index.column()].replace(",", "")
self._data[index.row()][index.column()] = eval(_tmp)
elif kwargs['atype'] == "unicode":
self._data[index.row()][index.column()] = str(
self._data[index.row()][index.column()]
)
elif kwargs['atype'] == "int":
self._data[index.row()][index.column()] = int(
self._data[index.row()][index.column()])
elif kwargs['atype'] == "float":
self._data[index.row()][index.column()] = float(
self._data[index.row()][index.column()])
self.dataChanged.emit(index, index)
except Exception as instance:
print(instance) # spyder: test-skip
def reset(self):
self.beginResetModel()
self.endResetModel()
| PreviewTableModel |
python | graphql-python__graphene | graphene/utils/orderedtype.py | {
"start": 55,
"end": 1221
} | class ____:
creation_counter = 1
def __init__(self, _creation_counter=None):
self.creation_counter = _creation_counter or self.gen_counter()
@staticmethod
def gen_counter():
counter = OrderedType.creation_counter
OrderedType.creation_counter += 1
return counter
def reset_counter(self):
self.creation_counter = self.gen_counter()
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(self, type(other)):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, OrderedType):
return self.creation_counter < other.creation_counter
return NotImplemented
def __gt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, OrderedType):
return self.creation_counter > other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
| OrderedType |
python | getsentry__sentry | src/sentry/integrations/web/integration_extension_configuration.py | {
"start": 886,
"end": 1770
} | class ____(IntegrationPipeline):
def _dialog_success(self, _org_integration):
assert self.organization, "Organization must exist to get slug"
org_slug = self.organization.slug
assert isinstance(
self.provider, IntegrationProvider
), "Must be an IntegrationProvider to get integration key"
provider = self.provider.integration_key
integration_id = self.integration.id
# add in param string if we have a next page
param_string = ""
if "next" in self.request.GET:
param_string = "?%s" % urlencode({"next": self.request.GET["next"]})
redirect_uri = self.organization.absolute_url(
f"/settings/{org_slug}/integrations/{provider}/{integration_id}/",
query=param_string,
)
return HttpResponseRedirect(redirect_uri)
| ExternalIntegrationPipeline |
python | pikepdf__pikepdf | tests/test_object.py | {
"start": 7114,
"end": 8860
} | class ____:
def test_name_equality(self):
# Who needs transitivity? :P
# While this is less than ideal ('/Foo' != b'/Foo') it allows for slightly
# sloppy tests like if colorspace == '/Indexed' without requiring
# Name('/Indexed') everywhere
assert Name('/Foo') == '/Foo'
assert Name('/Foo') == b'/Foo'
assert Name.Foo == Name('/Foo')
def test_unslashed_name(self):
with pytest.raises(ValueError, match='must begin with'):
assert Name('Monty') not in [] # pylint: disable=expression-not-assigned
def test_empty_name(self):
with pytest.raises(ValueError):
Name('')
with pytest.raises(ValueError):
Name('/')
def test_forbidden_name_usage(self):
with pytest.raises(AttributeError, match="may not be set on pikepdf.Name"):
Name.Monty = Name.Python
with pytest.raises(TypeError, match="not subscriptable"):
Name['/Monty'] # pylint: disable=pointless-statement
if sys.implementation.name == 'pypy':
pytest.xfail(reason="pypy seems to do setattr differently")
with pytest.raises(AttributeError, match="has no attribute"):
monty = Name.Monty
monty.Attribute = 42
def test_bytes_of_name(self):
assert bytes(Name.ABC) == b'/ABC'
def test_name_from_name(self):
foo = Name('/Foo')
assert Name(foo) == foo
def test_name_bool(self):
assert bool(Name('/Foo')) is True
# Currently we forbid the empty name. All creatable names are true.
with pytest.raises(ValueError):
bool(Name('/'))
with pytest.raises(ValueError):
bool(Name(''))
| TestName |
python | astropy__astropy | astropy/coordinates/representation/base.py | {
"start": 1384,
"end": 4518
} | class ____(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Indicates unit is read-only
_supports_indexing = False
mask_val = np.ma.masked
@staticmethod
def default_format(val):
# Create numpy dtype so that numpy formatting will work.
components = val.components
values = tuple(getattr(val, component).value for component in components)
a = np.empty(
getattr(val, "shape", ()),
[(component, value.dtype) for component, value in zip(components, values)],
)
for component, value in zip(components, values):
a[component] = value
return str(a)
@property
def _represent_as_dict_attrs(self):
return self._parent.components
@property
def unit(self):
if self._parent is None:
return None
unit = self._parent._unitstr
return unit[1:-1] if unit.startswith("(") else unit
def new_like(self, reps, length, metadata_conflicts="warn", name=None):
"""
Return a new instance like ``reps`` with ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
reps : list
List of input representations or differentials.
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential` subclass instance
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
reps, metadata_conflicts, name, ("meta", "description")
)
# Make a new representation or differential with the desired length.
rep0 = reps[0]
out = rep0._apply(np.zeros_like, shape=(length,) + rep0.shape[1:])
# Use __setitem__ machinery to check whether all representations
# can represent themselves as this one without loss of information.
# We use :0 to ensure we do not break on empty coordinates (with the
# side benefit that we do not actually set anything).
for rep in reps[1:]:
try:
out[:0] = rep[:0]
except Exception as err:
raise ValueError("input representations are inconsistent.") from err
# Set (merged) info attributes.
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
| BaseRepresentationOrDifferentialInfo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 1887,
"end": 10297
} | class ____(HttpStream, ABC):
primary_key = "id"
# Detect streams with high API load
large_stream = False
max_retries: int = 5
stream_base_params = {}
def __init__(self, api_url: str = "https://api.github.com", access_token_type: str = "", **kwargs):
if kwargs.get("authenticator"):
kwargs["authenticator"].max_time = kwargs.pop("max_waiting_time", self.max_time)
super().__init__(**kwargs)
self.access_token_type = access_token_type
self.api_url = api_url
self.state = {}
if not self.supports_incremental:
self.cursor = SubstreamResumableFullRefreshCursor()
@property
def url_base(self) -> str:
return self.api_url
@property
def availability_strategy(self) -> Optional["AvailabilityStrategy"]:
return None
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
links = response.links
if "next" in links:
next_link = links["next"]["url"]
parsed_link = parse.urlparse(next_link)
page = dict(parse.parse_qsl(parsed_link.query)).get("page")
return {"page": page}
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = {"per_page": self.page_size}
if next_page_token:
params.update(next_page_token)
params.update(self.stream_base_params)
return params
def request_headers(self, **kwargs) -> Mapping[str, Any]:
# Without sending `User-Agent` header we will be getting `403 Client Error: Forbidden for url` error.
return {"User-Agent": "PostmanRuntime/7.28.0"}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
for record in response.json(): # GitHub puts records in an array.
yield self.transform(record=record, stream_slice=stream_slice)
def get_error_handler(self) -> Optional[ErrorHandler]:
return GithubStreamABCErrorHandler(
logger=self.logger, max_retries=self.max_retries, error_mapping=GITHUB_DEFAULT_ERROR_MAPPING, stream=self
)
def get_backoff_strategy(self) -> Optional[Union[BackoffStrategy, List[BackoffStrategy]]]:
return GithubStreamABCBackoffStrategy(stream=self)
@staticmethod
def check_graphql_rate_limited(response_json: dict) -> bool:
errors = response_json.get("errors")
if errors:
for error in errors:
if error.get("type") == "RATE_LIMITED":
return True
return False
def read_records(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
# get out the stream_slice parts for later use.
organisation = stream_slice.get("organization", "")
repository = stream_slice.get("repository", "")
# Reading records while handling the errors
try:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
# HTTP Client wraps DefaultBackoffException into MessageRepresentationAirbyteTracedErrors
except MessageRepresentationAirbyteTracedErrors as e:
# This whole try/except situation in `read_records()` isn't good but right now in `self._send_request()`
# function we have `response.raise_for_status()` so we don't have much choice on how to handle errors.
# Bocked on https://github.com/airbytehq/airbyte/issues/3514.
if not hasattr(e, "_exception") and not hasattr(e._exception, "response"):
raise e
if e._exception.response.status_code == requests.codes.NOT_FOUND:
# A lot of streams are not available for repositories owned by a user instead of an organization.
if isinstance(self, Organizations):
error_msg = f"Syncing `{self.__class__.__name__}` stream isn't available for organization `{organisation}`."
elif isinstance(self, TeamMemberships):
error_msg = f"Syncing `{self.__class__.__name__}` stream for organization `{organisation}`, team `{stream_slice.get('team_slug')}` and user `{stream_slice.get('username')}` isn't available: User has no team membership. Skipping..."
else:
error_msg = f"Syncing `{self.__class__.__name__}` stream isn't available for repository `{repository}`."
elif e._exception.response.status_code == requests.codes.FORBIDDEN:
error_msg = str(e._exception.response.json().get("message"))
# When using the `check_connection` method, we should raise an error if we do not have access to the repository.
if isinstance(self, Repositories):
raise e
# When `403` for the stream, that has no access to the organization's teams, based on OAuth Apps Restrictions:
# https://docs.github.com/en/organizations/restricting-access-to-your-organizations-data/enabling-oauth-app-access-restrictions-for-your-organization
# For all `Organisation` based streams
elif isinstance(self, Organizations) or isinstance(self, Teams) or isinstance(self, Users):
error_msg = (
f"Syncing `{self.name}` stream isn't available for organization `{organisation}`. Full error message: {error_msg}"
)
# For all other `Repository` base streams
else:
error_msg = (
f"Syncing `{self.name}` stream isn't available for repository `{repository}`. Full error message: {error_msg}"
)
elif e._exception.response.status_code == requests.codes.UNAUTHORIZED:
if self.access_token_type == constants.PERSONAL_ACCESS_TOKEN_TITLE:
error_msg = str(e._exception.response.json().get("message"))
self.logger.error(f"{self.access_token_type} renewal is required: {error_msg}")
raise e
elif e._exception.response.status_code == requests.codes.GONE and isinstance(self, Projects):
# Some repos don't have projects enabled and we we get "410 Client Error: Gone for
# url: https://api.github.com/repos/xyz/projects?per_page=100" error.
error_msg = f"Syncing `Projects` stream isn't available for repository `{stream_slice['repository']}`."
elif e._exception.response.status_code == requests.codes.CONFLICT:
error_msg = (
f"Syncing `{self.name}` stream isn't available for repository "
f"`{stream_slice['repository']}`, it seems like this repository is empty."
)
elif e._exception.response.status_code == requests.codes.SERVER_ERROR and isinstance(self, WorkflowRuns):
error_msg = f"Syncing `{self.name}` stream isn't available for repository `{stream_slice['repository']}`."
elif e._exception.response.status_code == requests.codes.BAD_GATEWAY:
error_msg = f"Stream {self.name} temporary failed. Try to re-run sync later"
else:
# most probably here we're facing a 500 server error and a risk to get a non-json response, so lets output response.text
self.logger.error(f"Undefined error while reading records: {e._exception.response.text}")
raise e
self.logger.warning(error_msg)
except GitHubAPILimitException as e:
internal_message = (
f"Stream: `{self.name}`, slice: `{stream_slice}`. Limits for all provided tokens are reached, please try again later"
)
message = "Rate Limits for all provided tokens are reached. For more information please refer to documentation: https://docs.airbyte.com/integrations/sources/github#limitations--troubleshooting"
raise AirbyteTracedException(internal_message=internal_message, message=message, failure_type=FailureType.config_error) from e
| GithubStreamABC |
python | kubernetes-client__python | kubernetes/client/models/v1_watch_event.py | {
"start": 383,
"end": 4740
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'object': 'object',
'type': 'str'
}
attribute_map = {
'object': 'object',
'type': 'type'
}
def __init__(self, object=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1WatchEvent - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._object = None
self._type = None
self.discriminator = None
self.object = object
self.type = type
@property
def object(self):
"""Gets the object of this V1WatchEvent. # noqa: E501
Object is: * If Type is Added or Modified: the new state of the object. * If Type is Deleted: the state of the object immediately before deletion. * If Type is Error: *Status is recommended; other types may make sense depending on context. # noqa: E501
:return: The object of this V1WatchEvent. # noqa: E501
:rtype: object
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this V1WatchEvent.
Object is: * If Type is Added or Modified: the new state of the object. * If Type is Deleted: the state of the object immediately before deletion. * If Type is Error: *Status is recommended; other types may make sense depending on context. # noqa: E501
:param object: The object of this V1WatchEvent. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and object is None: # noqa: E501
raise ValueError("Invalid value for `object`, must not be `None`") # noqa: E501
self._object = object
@property
def type(self):
"""Gets the type of this V1WatchEvent. # noqa: E501
:return: The type of this V1WatchEvent. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1WatchEvent.
:param type: The type of this V1WatchEvent. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1WatchEvent):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1WatchEvent):
return True
return self.to_dict() != other.to_dict()
| V1WatchEvent |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | {
"start": 3352,
"end": 4028
} | class ____(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def name_scope_only_in_function_or_graph(name):
"""Internal-only entry point for `name_scope*`.
Enters a compat.v1.name_scope only when in a function or graph,
not when running fully eagerly.
Args:
name: The name argument that is passed to the op function.
Returns:
`name_scope*` context manager.
"""
if not context.executing_eagerly():
return ops.name_scope_v1(name)
else:
return NullContextmanager()
| NullContextmanager |
python | ansible__ansible | lib/ansible/modules/service_facts.py | {
"start": 14155,
"end": 15438
} | class ____(BaseService):
def gather_services(self):
services = {}
if platform.system() == 'AIX':
lssrc_path = self.module.get_bin_path("lssrc")
if lssrc_path:
rc, stdout, stderr = self.module.run_command("%s -a" % lssrc_path)
if rc != 0:
self.module.warn("lssrc could not retrieve service data (%s): %s" % (rc, stderr))
else:
for line in stdout.split('\n'):
line_data = line.split()
if len(line_data) < 2:
continue # Skipping because we expected more data
if line_data[0] == "Subsystem":
continue # Skip header
service_name = line_data[0]
if line_data[-1] == "active":
service_state = "running"
elif line_data[-1] == "inoperative":
service_state = "stopped"
else:
service_state = "unknown"
services[service_name] = {"name": service_name, "state": service_state, "source": "src"}
return services
| AIXScanService |
python | getsentry__sentry | tests/sentry/integrations/source_code_management/test_commit_context.py | {
"start": 1133,
"end": 8952
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration = MockCommitContextIntegration()
self.repo = Repository.objects.create(
organization_id=self.organization.id,
name="example/repo",
)
self.source_line = SourceLineInfo(
lineno=10, path="src/file.py", ref="main", repo=self.repo, code_mapping=Mock()
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_success(self, mock_record: MagicMock) -> None:
"""Test successful blame retrieval records correct lifecycle events"""
self.integration.client.get_blame_for_files.return_value = []
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_missing_identity(self, mock_record: MagicMock) -> None:
"""Test missing identity records failure"""
self.integration.get_client = Mock(side_effect=Identity.DoesNotExist())
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert len(mock_record.mock_calls) == 2
assert_slo_metric(mock_record, EventLifecycleOutcome.FAILURE)
assert_failure_metric(mock_record, Identity.DoesNotExist())
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_invalid_identity(self, mock_record: MagicMock) -> None:
"""Test invalid identity records failure"""
from sentry.auth.exceptions import IdentityNotValid
self.integration.client.get_blame_for_files = Mock(side_effect=IdentityNotValid())
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert_slo_metric(mock_record, EventLifecycleOutcome.FAILURE)
assert_failure_metric(mock_record, IdentityNotValid())
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_rate_limited(self, mock_record: MagicMock) -> None:
"""Test rate limited requests record halt"""
from sentry.shared_integrations.exceptions import ApiRateLimitedError
self.integration.client.get_blame_for_files = Mock(
side_effect=ApiRateLimitedError(text="Rate limited")
)
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
assert_halt_metric(mock_record, ApiRateLimitedError(text="Rate limited"))
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_invalid_request(self, mock_record: MagicMock) -> None:
"""Test invalid request records halt"""
from sentry.shared_integrations.exceptions import ApiInvalidRequestError
self.integration.client.get_blame_for_files = Mock(
side_effect=ApiInvalidRequestError(text="Invalid request")
)
self.integration.get_blame_for_files([self.source_line], {})
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
assert_halt_metric(mock_record, ApiInvalidRequestError(text="Invalid request"))
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_invalid_request_gitlab(self, mock_record: MagicMock) -> None:
"""Test invalid request for GitLab records halt"""
from sentry.shared_integrations.exceptions import ApiInvalidRequestError
class MockGitlabIntegration(MockCommitContextIntegration):
integration_name = "gitlab"
self.integration = MockGitlabIntegration()
self.integration.client.get_blame_for_files = Mock(
side_effect=ApiInvalidRequestError(text="Invalid request")
)
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
assert_halt_metric(mock_record, ApiInvalidRequestError(text="Invalid request"))
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_api_host_error_gitlab(self, mock_record: MagicMock) -> None:
class MockGitlabIntegration(MockCommitContextIntegration):
integration_name = "gitlab"
self.integration = MockGitlabIntegration()
self.integration.client.get_blame_for_files = Mock(
side_effect=ApiHostError(text="retried too many times")
)
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
assert_halt_metric(mock_record, ApiHostError(text="retried too many times"))
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_retry_error(self, mock_record: MagicMock) -> None:
"""Test retry error for Gitlab Self-hosted records halt"""
from sentry.shared_integrations.exceptions import ApiRetryError
# Because this is Gitlab Self-hosted, this should be halt
class MockGitlabIntegration(MockCommitContextIntegration):
integration_name = "gitlab"
base_url = "https://bufo-bot.gitlab.com"
def __init__(self) -> None:
super().__init__()
self.client.base_url = self.base_url
self.integration = MockGitlabIntegration()
self.integration.client.get_blame_for_files = Mock(
side_effect=ApiRetryError(text="Host error")
)
result = self.integration.get_blame_for_files([self.source_line], {})
assert result == []
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
assert_halt_metric(mock_record, ApiRetryError(text="Host error"))
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_blame_for_files_retry_error_gitlab(self, mock_record: MagicMock) -> None:
"""Test retry error for GitLab saas records failure"""
from sentry.shared_integrations.exceptions import ApiRetryError
# Because this is Gitlab SAAS, this should be failure
class MockGitlabIntegration(MockCommitContextIntegration):
integration_name = "gitlab"
base_url = GITLAB_CLOUD_BASE_URL
def __init__(self) -> None:
super().__init__()
self.client.base_url = self.base_url
self.integration = MockGitlabIntegration()
self.integration.client.get_blame_for_files = Mock(
side_effect=ApiRetryError(text="Host error")
)
with pytest.raises(ApiRetryError):
self.integration.get_blame_for_files([self.source_line], {})
assert_slo_metric(mock_record, EventLifecycleOutcome.FAILURE)
assert_failure_metric(mock_record, ApiRetryError(text="Host error"))
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_commit_context_all_frames(self, mock_record: MagicMock) -> None:
"""Test get_commit_context_all_frames records correct lifecycle events"""
self.integration.client.get_blame_for_files.return_value = []
result = self.integration.get_commit_context_all_frames([self.source_line], {})
assert result == []
assert len(mock_record.mock_calls) == 2
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
| TestCommitContextIntegrationSLO |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.