language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_optimize03.py | {
"start": 315,
"end": 2699
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
worksheet.set_column("A:A", 36, bold)
worksheet.set_column("B:B", 20)
worksheet.set_row(0, 39.75)
heading_format = workbook.add_format(
{
"bold": 1,
"font_color": "blue",
"font_size": 16,
"align": "centre_across",
"valign": "vcenter",
}
)
heading_format.text_h_align = 6
hyperlink_format = workbook.add_format(
{
"font_color": "blue",
"underline": 1,
}
)
headings = ["Features of Excel::Writer::XLSX", ""]
worksheet.write_row("A1", headings, heading_format)
text_format = workbook.add_format(
{
"bold": 1,
"italic": 1,
"font_color": "red",
"font_size": 18,
"font_name": "Lucida Calligraphy",
}
)
worksheet.write("A2", "Text")
worksheet.write("B2", "Hello Excel")
worksheet.write("A3", "Formatted text")
worksheet.write("B3", "Hello Excel", text_format)
num1_format = workbook.add_format({"num_format": "$#,##0.00"})
num2_format = workbook.add_format({"num_format": " d mmmm yyy"})
worksheet.write("A5", "Numbers")
worksheet.write("B5", 1234.56)
worksheet.write("A6", "Formatted numbers")
worksheet.write("B6", 1234.56, num1_format)
worksheet.write("A7", "Formatted numbers")
worksheet.write("B7", 37257, num2_format)
worksheet.write("A8", 'Formulas and functions, "=SIN(PI()/4)"')
worksheet.write("B8", "=SIN(PI()/4)")
worksheet.write("A9", "Hyperlinks")
worksheet.write("B9", "http://www.perl.com/", hyperlink_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py | {
"start": 2258,
"end": 7086
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: GPTNeoXJapaneseConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[GPTNeoXJapaneseConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
| GPTNeoXJapaneseRotaryEmbedding |
python | sympy__sympy | sympy/stats/joint_rv_types.py | {
"start": 15105,
"end": 17209
} | class ____(JointDistribution):
_argnames = ('alpha',)
is_Continuous = True
@staticmethod
def check(alpha):
_value_check(len(alpha) >= 2, "At least two categories should be passed.")
for a_k in alpha:
_value_check((a_k > 0) != False, "Each concentration parameter"
" should be positive.")
@property
def set(self):
k = len(self.alpha)
return Interval(0, 1)**k
def pdf(self, *syms):
alpha = self.alpha
B = Mul.fromiter(map(gamma, alpha))/gamma(Add(*alpha))
return Mul.fromiter(sym**(a_k - 1) for a_k, sym in zip(alpha, syms))/B
def MultivariateBeta(syms, *alpha):
"""
Creates a continuous random variable with Dirichlet/Multivariate Beta
Distribution.
The density of the Dirichlet distribution can be found at [1].
Parameters
==========
alpha : Positive real numbers
Signifies concentration numbers.
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, MultivariateBeta, marginal_distribution
>>> from sympy import Symbol
>>> a1 = Symbol('a1', positive=True)
>>> a2 = Symbol('a2', positive=True)
>>> B = MultivariateBeta('B', [a1, a2])
>>> C = MultivariateBeta('C', a1, a2)
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> density(B)(x, y)
x**(a1 - 1)*y**(a2 - 1)*gamma(a1 + a2)/(gamma(a1)*gamma(a2))
>>> marginal_distribution(C, C[0])(x)
x**(a1 - 1)*gamma(a1 + a2)/(a2*gamma(a1)*gamma(a2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Dirichlet_distribution
.. [2] https://mathworld.wolfram.com/DirichletDistribution.html
"""
if not isinstance(alpha[0], list):
alpha = (list(alpha),)
return multivariate_rv(MultivariateBetaDistribution, syms, alpha[0])
Dirichlet = MultivariateBeta
#-------------------------------------------------------------------------------
# Multivariate Ewens distribution ----------------------------------------------
| MultivariateBetaDistribution |
python | sympy__sympy | sympy/parsing/autolev/_antlr/autolevparser.py | {
"start": 12328,
"end": 106291
} | class ____ ( Parser ):
grammarFileName = "Autolev.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'['", "']'", "'='", "'+='", "'-='", "':='",
"'*='", "'/='", "'^='", "','", "'''", "'('", "')'",
"'{'", "'}'", "':'", "'+'", "'-'", "';'", "'.'", "'>'",
"'0>'", "'1>>'", "'^'", "'*'", "'/'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "Mass", "Inertia",
"Input", "Output", "Save", "UnitSystem", "Encode",
"Newtonian", "Frames", "Bodies", "Particles", "Points",
"Constants", "Specifieds", "Imaginary", "Variables",
"MotionVariables", "INT", "FLOAT", "EXP", "LINE_COMMENT",
"ID", "WS" ]
RULE_prog = 0
RULE_stat = 1
RULE_assignment = 2
RULE_equals = 3
RULE_index = 4
RULE_diff = 5
RULE_functionCall = 6
RULE_varDecl = 7
RULE_varType = 8
RULE_varDecl2 = 9
RULE_ranges = 10
RULE_massDecl = 11
RULE_massDecl2 = 12
RULE_inertiaDecl = 13
RULE_matrix = 14
RULE_matrixInOutput = 15
RULE_codeCommands = 16
RULE_settings = 17
RULE_units = 18
RULE_inputs = 19
RULE_id_diff = 20
RULE_inputs2 = 21
RULE_outputs = 22
RULE_outputs2 = 23
RULE_codegen = 24
RULE_commands = 25
RULE_vec = 26
RULE_expr = 27
ruleNames = [ "prog", "stat", "assignment", "equals", "index", "diff",
"functionCall", "varDecl", "varType", "varDecl2", "ranges",
"massDecl", "massDecl2", "inertiaDecl", "matrix", "matrixInOutput",
"codeCommands", "settings", "units", "inputs", "id_diff",
"inputs2", "outputs", "outputs2", "codegen", "commands",
"vec", "expr" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
T__18=19
T__19=20
T__20=21
T__21=22
T__22=23
T__23=24
T__24=25
T__25=26
Mass=27
Inertia=28
Input=29
Output=30
Save=31
UnitSystem=32
Encode=33
Newtonian=34
Frames=35
Bodies=36
Particles=37
Points=38
Constants=39
Specifieds=40
Imaginary=41
Variables=42
MotionVariables=43
INT=44
FLOAT=45
EXP=46
LINE_COMMENT=47
ID=48
WS=49
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.11.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def stat(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.StatContext)
else:
return self.getTypedRuleContext(AutolevParser.StatContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_prog
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProg" ):
listener.enterProg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProg" ):
listener.exitProg(self)
def prog(self):
localctx = AutolevParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_prog)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 57
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 56
self.stat()
self.state = 59
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (((_la) & ~0x3f) == 0 and ((1 << _la) & 299067041120256) != 0):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def varDecl(self):
return self.getTypedRuleContext(AutolevParser.VarDeclContext,0)
def functionCall(self):
return self.getTypedRuleContext(AutolevParser.FunctionCallContext,0)
def codeCommands(self):
return self.getTypedRuleContext(AutolevParser.CodeCommandsContext,0)
def massDecl(self):
return self.getTypedRuleContext(AutolevParser.MassDeclContext,0)
def inertiaDecl(self):
return self.getTypedRuleContext(AutolevParser.InertiaDeclContext,0)
def assignment(self):
return self.getTypedRuleContext(AutolevParser.AssignmentContext,0)
def settings(self):
return self.getTypedRuleContext(AutolevParser.SettingsContext,0)
def getRuleIndex(self):
return AutolevParser.RULE_stat
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStat" ):
listener.enterStat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStat" ):
listener.exitStat(self)
def stat(self):
localctx = AutolevParser.StatContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_stat)
try:
self.state = 68
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,1,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 61
self.varDecl()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 62
self.functionCall()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 63
self.codeCommands()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 64
self.massDecl()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 65
self.inertiaDecl()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 66
self.assignment()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 67
self.settings()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return AutolevParser.RULE_assignment
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class VecAssignContext(AssignmentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.AssignmentContext
super().__init__(parser)
self.copyFrom(ctx)
def vec(self):
return self.getTypedRuleContext(AutolevParser.VecContext,0)
def equals(self):
return self.getTypedRuleContext(AutolevParser.EqualsContext,0)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVecAssign" ):
listener.enterVecAssign(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVecAssign" ):
listener.exitVecAssign(self)
class RegularAssignContext(AssignmentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.AssignmentContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def equals(self):
return self.getTypedRuleContext(AutolevParser.EqualsContext,0)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def diff(self):
return self.getTypedRuleContext(AutolevParser.DiffContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRegularAssign" ):
listener.enterRegularAssign(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRegularAssign" ):
listener.exitRegularAssign(self)
class IndexAssignContext(AssignmentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.AssignmentContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def index(self):
return self.getTypedRuleContext(AutolevParser.IndexContext,0)
def equals(self):
return self.getTypedRuleContext(AutolevParser.EqualsContext,0)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIndexAssign" ):
listener.enterIndexAssign(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIndexAssign" ):
listener.exitIndexAssign(self)
def assignment(self):
localctx = AutolevParser.AssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_assignment)
self._la = 0 # Token type
try:
self.state = 88
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
localctx = AutolevParser.VecAssignContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 70
self.vec()
self.state = 71
self.equals()
self.state = 72
self.expr(0)
pass
elif la_ == 2:
localctx = AutolevParser.IndexAssignContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 74
self.match(AutolevParser.ID)
self.state = 75
self.match(AutolevParser.T__0)
self.state = 76
self.index()
self.state = 77
self.match(AutolevParser.T__1)
self.state = 78
self.equals()
self.state = 79
self.expr(0)
pass
elif la_ == 3:
localctx = AutolevParser.RegularAssignContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 81
self.match(AutolevParser.ID)
self.state = 83
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==11:
self.state = 82
self.diff()
self.state = 85
self.equals()
self.state = 86
self.expr(0)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EqualsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return AutolevParser.RULE_equals
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEquals" ):
listener.enterEquals(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEquals" ):
listener.exitEquals(self)
def equals(self):
localctx = AutolevParser.EqualsContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_equals)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 90
_la = self._input.LA(1)
if not(((_la) & ~0x3f) == 0 and ((1 << _la) & 1016) != 0):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IndexContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_index
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIndex" ):
listener.enterIndex(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIndex" ):
listener.exitIndex(self)
def index(self):
localctx = AutolevParser.IndexContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_index)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 92
self.expr(0)
self.state = 97
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 93
self.match(AutolevParser.T__9)
self.state = 94
self.expr(0)
self.state = 99
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DiffContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return AutolevParser.RULE_diff
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDiff" ):
listener.enterDiff(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDiff" ):
listener.exitDiff(self)
def diff(self):
localctx = AutolevParser.DiffContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_diff)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 101
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 100
self.match(AutolevParser.T__10)
self.state = 103
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==11):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionCallContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def Mass(self):
return self.getToken(AutolevParser.Mass, 0)
def Inertia(self):
return self.getToken(AutolevParser.Inertia, 0)
def getRuleIndex(self):
return AutolevParser.RULE_functionCall
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionCall" ):
listener.enterFunctionCall(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionCall" ):
listener.exitFunctionCall(self)
def functionCall(self):
localctx = AutolevParser.FunctionCallContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_functionCall)
self._la = 0 # Token type
try:
self.state = 131
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [48]:
self.enterOuterAlt(localctx, 1)
self.state = 105
self.match(AutolevParser.ID)
self.state = 106
self.match(AutolevParser.T__11)
self.state = 115
self._errHandler.sync(self)
_la = self._input.LA(1)
if ((_la) & ~0x3f) == 0 and ((1 << _la) & 404620694540290) != 0:
self.state = 107
self.expr(0)
self.state = 112
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 108
self.match(AutolevParser.T__9)
self.state = 109
self.expr(0)
self.state = 114
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 117
self.match(AutolevParser.T__12)
pass
elif token in [27, 28]:
self.enterOuterAlt(localctx, 2)
self.state = 118
_la = self._input.LA(1)
if not(_la==27 or _la==28):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 119
self.match(AutolevParser.T__11)
self.state = 128
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==48:
self.state = 120
self.match(AutolevParser.ID)
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 121
self.match(AutolevParser.T__9)
self.state = 122
self.match(AutolevParser.ID)
self.state = 127
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 130
self.match(AutolevParser.T__12)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarDeclContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def varType(self):
return self.getTypedRuleContext(AutolevParser.VarTypeContext,0)
def varDecl2(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.VarDecl2Context)
else:
return self.getTypedRuleContext(AutolevParser.VarDecl2Context,i)
def getRuleIndex(self):
return AutolevParser.RULE_varDecl
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVarDecl" ):
listener.enterVarDecl(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVarDecl" ):
listener.exitVarDecl(self)
def varDecl(self):
localctx = AutolevParser.VarDeclContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_varDecl)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 133
self.varType()
self.state = 134
self.varDecl2()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 135
self.match(AutolevParser.T__9)
self.state = 136
self.varDecl2()
self.state = 141
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Newtonian(self):
return self.getToken(AutolevParser.Newtonian, 0)
def Frames(self):
return self.getToken(AutolevParser.Frames, 0)
def Bodies(self):
return self.getToken(AutolevParser.Bodies, 0)
def Particles(self):
return self.getToken(AutolevParser.Particles, 0)
def Points(self):
return self.getToken(AutolevParser.Points, 0)
def Constants(self):
return self.getToken(AutolevParser.Constants, 0)
def Specifieds(self):
return self.getToken(AutolevParser.Specifieds, 0)
def Imaginary(self):
return self.getToken(AutolevParser.Imaginary, 0)
def Variables(self):
return self.getToken(AutolevParser.Variables, 0)
def MotionVariables(self):
return self.getToken(AutolevParser.MotionVariables, 0)
def getRuleIndex(self):
return AutolevParser.RULE_varType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVarType" ):
listener.enterVarType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVarType" ):
listener.exitVarType(self)
def varType(self):
localctx = AutolevParser.VarTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_varType)
self._la = 0 # Token type
try:
self.state = 164
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [34]:
self.enterOuterAlt(localctx, 1)
self.state = 142
self.match(AutolevParser.Newtonian)
pass
elif token in [35]:
self.enterOuterAlt(localctx, 2)
self.state = 143
self.match(AutolevParser.Frames)
pass
elif token in [36]:
self.enterOuterAlt(localctx, 3)
self.state = 144
self.match(AutolevParser.Bodies)
pass
elif token in [37]:
self.enterOuterAlt(localctx, 4)
self.state = 145
self.match(AutolevParser.Particles)
pass
elif token in [38]:
self.enterOuterAlt(localctx, 5)
self.state = 146
self.match(AutolevParser.Points)
pass
elif token in [39]:
self.enterOuterAlt(localctx, 6)
self.state = 147
self.match(AutolevParser.Constants)
pass
elif token in [40]:
self.enterOuterAlt(localctx, 7)
self.state = 148
self.match(AutolevParser.Specifieds)
pass
elif token in [41]:
self.enterOuterAlt(localctx, 8)
self.state = 149
self.match(AutolevParser.Imaginary)
pass
elif token in [42]:
self.enterOuterAlt(localctx, 9)
self.state = 150
self.match(AutolevParser.Variables)
self.state = 154
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==11:
self.state = 151
self.match(AutolevParser.T__10)
self.state = 156
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [43]:
self.enterOuterAlt(localctx, 10)
self.state = 157
self.match(AutolevParser.MotionVariables)
self.state = 161
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==11:
self.state = 158
self.match(AutolevParser.T__10)
self.state = 163
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarDecl2Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def INT(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.INT)
else:
return self.getToken(AutolevParser.INT, i)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def getRuleIndex(self):
return AutolevParser.RULE_varDecl2
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVarDecl2" ):
listener.enterVarDecl2(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVarDecl2" ):
listener.exitVarDecl2(self)
def varDecl2(self):
localctx = AutolevParser.VarDecl2Context(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_varDecl2)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 166
self.match(AutolevParser.ID)
self.state = 172
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,15,self._ctx)
if la_ == 1:
self.state = 167
self.match(AutolevParser.T__13)
self.state = 168
self.match(AutolevParser.INT)
self.state = 169
self.match(AutolevParser.T__9)
self.state = 170
self.match(AutolevParser.INT)
self.state = 171
self.match(AutolevParser.T__14)
self.state = 188
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,17,self._ctx)
if la_ == 1:
self.state = 174
self.match(AutolevParser.T__13)
self.state = 175
self.match(AutolevParser.INT)
self.state = 176
self.match(AutolevParser.T__15)
self.state = 177
self.match(AutolevParser.INT)
self.state = 184
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 178
self.match(AutolevParser.T__9)
self.state = 179
self.match(AutolevParser.INT)
self.state = 180
self.match(AutolevParser.T__15)
self.state = 181
self.match(AutolevParser.INT)
self.state = 186
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 187
self.match(AutolevParser.T__14)
self.state = 193
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==14:
self.state = 190
self.match(AutolevParser.T__13)
self.state = 191
self.match(AutolevParser.INT)
self.state = 192
self.match(AutolevParser.T__14)
self.state = 196
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==17 or _la==18:
self.state = 195
_la = self._input.LA(1)
if not(_la==17 or _la==18):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==11:
self.state = 198
self.match(AutolevParser.T__10)
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 206
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==3:
self.state = 204
self.match(AutolevParser.T__2)
self.state = 205
self.expr(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RangesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.INT)
else:
return self.getToken(AutolevParser.INT, i)
def getRuleIndex(self):
return AutolevParser.RULE_ranges
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRanges" ):
listener.enterRanges(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRanges" ):
listener.exitRanges(self)
def ranges(self):
localctx = AutolevParser.RangesContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_ranges)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 208
self.match(AutolevParser.T__13)
self.state = 209
self.match(AutolevParser.INT)
self.state = 210
self.match(AutolevParser.T__15)
self.state = 211
self.match(AutolevParser.INT)
self.state = 218
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 212
self.match(AutolevParser.T__9)
self.state = 213
self.match(AutolevParser.INT)
self.state = 214
self.match(AutolevParser.T__15)
self.state = 215
self.match(AutolevParser.INT)
self.state = 220
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 221
self.match(AutolevParser.T__14)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MassDeclContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Mass(self):
return self.getToken(AutolevParser.Mass, 0)
def massDecl2(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.MassDecl2Context)
else:
return self.getTypedRuleContext(AutolevParser.MassDecl2Context,i)
def getRuleIndex(self):
return AutolevParser.RULE_massDecl
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMassDecl" ):
listener.enterMassDecl(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMassDecl" ):
listener.exitMassDecl(self)
def massDecl(self):
localctx = AutolevParser.MassDeclContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_massDecl)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 223
self.match(AutolevParser.Mass)
self.state = 224
self.massDecl2()
self.state = 229
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 225
self.match(AutolevParser.T__9)
self.state = 226
self.massDecl2()
self.state = 231
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MassDecl2Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def getRuleIndex(self):
return AutolevParser.RULE_massDecl2
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMassDecl2" ):
listener.enterMassDecl2(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMassDecl2" ):
listener.exitMassDecl2(self)
def massDecl2(self):
localctx = AutolevParser.MassDecl2Context(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_massDecl2)
try:
self.enterOuterAlt(localctx, 1)
self.state = 232
self.match(AutolevParser.ID)
self.state = 233
self.match(AutolevParser.T__2)
self.state = 234
self.expr(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InertiaDeclContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Inertia(self):
return self.getToken(AutolevParser.Inertia, 0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_inertiaDecl
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInertiaDecl" ):
listener.enterInertiaDecl(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInertiaDecl" ):
listener.exitInertiaDecl(self)
def inertiaDecl(self):
localctx = AutolevParser.InertiaDeclContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_inertiaDecl)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 236
self.match(AutolevParser.Inertia)
self.state = 237
self.match(AutolevParser.ID)
self.state = 241
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==12:
self.state = 238
self.match(AutolevParser.T__11)
self.state = 239
self.match(AutolevParser.ID)
self.state = 240
self.match(AutolevParser.T__12)
self.state = 245
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 243
self.match(AutolevParser.T__9)
self.state = 244
self.expr(0)
self.state = 247
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==10):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MatrixContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_matrix
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMatrix" ):
listener.enterMatrix(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMatrix" ):
listener.exitMatrix(self)
def matrix(self):
localctx = AutolevParser.MatrixContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_matrix)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 249
self.match(AutolevParser.T__0)
self.state = 250
self.expr(0)
self.state = 255
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10 or _la==19:
self.state = 251
_la = self._input.LA(1)
if not(_la==10 or _la==19):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 252
self.expr(0)
self.state = 257
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 258
self.match(AutolevParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MatrixInOutputContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def FLOAT(self):
return self.getToken(AutolevParser.FLOAT, 0)
def INT(self):
return self.getToken(AutolevParser.INT, 0)
def getRuleIndex(self):
return AutolevParser.RULE_matrixInOutput
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMatrixInOutput" ):
listener.enterMatrixInOutput(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMatrixInOutput" ):
listener.exitMatrixInOutput(self)
def matrixInOutput(self):
localctx = AutolevParser.MatrixInOutputContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_matrixInOutput)
self._la = 0 # Token type
try:
self.state = 268
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [48]:
self.enterOuterAlt(localctx, 1)
self.state = 260
self.match(AutolevParser.ID)
self.state = 261
self.match(AutolevParser.ID)
self.state = 262
self.match(AutolevParser.T__2)
self.state = 264
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==44 or _la==45:
self.state = 263
_la = self._input.LA(1)
if not(_la==44 or _la==45):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
elif token in [45]:
self.enterOuterAlt(localctx, 2)
self.state = 266
self.match(AutolevParser.FLOAT)
pass
elif token in [44]:
self.enterOuterAlt(localctx, 3)
self.state = 267
self.match(AutolevParser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CodeCommandsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def units(self):
return self.getTypedRuleContext(AutolevParser.UnitsContext,0)
def inputs(self):
return self.getTypedRuleContext(AutolevParser.InputsContext,0)
def outputs(self):
return self.getTypedRuleContext(AutolevParser.OutputsContext,0)
def codegen(self):
return self.getTypedRuleContext(AutolevParser.CodegenContext,0)
def commands(self):
return self.getTypedRuleContext(AutolevParser.CommandsContext,0)
def getRuleIndex(self):
return AutolevParser.RULE_codeCommands
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCodeCommands" ):
listener.enterCodeCommands(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCodeCommands" ):
listener.exitCodeCommands(self)
def codeCommands(self):
localctx = AutolevParser.CodeCommandsContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_codeCommands)
try:
self.state = 275
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [32]:
self.enterOuterAlt(localctx, 1)
self.state = 270
self.units()
pass
elif token in [29]:
self.enterOuterAlt(localctx, 2)
self.state = 271
self.inputs()
pass
elif token in [30]:
self.enterOuterAlt(localctx, 3)
self.state = 272
self.outputs()
pass
elif token in [48]:
self.enterOuterAlt(localctx, 4)
self.state = 273
self.codegen()
pass
elif token in [31, 33]:
self.enterOuterAlt(localctx, 5)
self.state = 274
self.commands()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SettingsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def EXP(self):
return self.getToken(AutolevParser.EXP, 0)
def FLOAT(self):
return self.getToken(AutolevParser.FLOAT, 0)
def INT(self):
return self.getToken(AutolevParser.INT, 0)
def getRuleIndex(self):
return AutolevParser.RULE_settings
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSettings" ):
listener.enterSettings(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSettings" ):
listener.exitSettings(self)
def settings(self):
localctx = AutolevParser.SettingsContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_settings)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 277
self.match(AutolevParser.ID)
self.state = 279
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,30,self._ctx)
if la_ == 1:
self.state = 278
_la = self._input.LA(1)
if not(((_la) & ~0x3f) == 0 and ((1 << _la) & 404620279021568) != 0):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnitsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def UnitSystem(self):
return self.getToken(AutolevParser.UnitSystem, 0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def getRuleIndex(self):
return AutolevParser.RULE_units
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnits" ):
listener.enterUnits(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnits" ):
listener.exitUnits(self)
def units(self):
localctx = AutolevParser.UnitsContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_units)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 281
self.match(AutolevParser.UnitSystem)
self.state = 282
self.match(AutolevParser.ID)
self.state = 287
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 283
self.match(AutolevParser.T__9)
self.state = 284
self.match(AutolevParser.ID)
self.state = 289
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InputsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Input(self):
return self.getToken(AutolevParser.Input, 0)
def inputs2(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.Inputs2Context)
else:
return self.getTypedRuleContext(AutolevParser.Inputs2Context,i)
def getRuleIndex(self):
return AutolevParser.RULE_inputs
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInputs" ):
listener.enterInputs(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInputs" ):
listener.exitInputs(self)
def inputs(self):
localctx = AutolevParser.InputsContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_inputs)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 290
self.match(AutolevParser.Input)
self.state = 291
self.inputs2()
self.state = 296
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 292
self.match(AutolevParser.T__9)
self.state = 293
self.inputs2()
self.state = 298
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Id_diffContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def diff(self):
return self.getTypedRuleContext(AutolevParser.DiffContext,0)
def getRuleIndex(self):
return AutolevParser.RULE_id_diff
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterId_diff" ):
listener.enterId_diff(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitId_diff" ):
listener.exitId_diff(self)
def id_diff(self):
localctx = AutolevParser.Id_diffContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_id_diff)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 299
self.match(AutolevParser.ID)
self.state = 301
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==11:
self.state = 300
self.diff()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Inputs2Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def id_diff(self):
return self.getTypedRuleContext(AutolevParser.Id_diffContext,0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_inputs2
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInputs2" ):
listener.enterInputs2(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInputs2" ):
listener.exitInputs2(self)
def inputs2(self):
localctx = AutolevParser.Inputs2Context(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_inputs2)
try:
self.enterOuterAlt(localctx, 1)
self.state = 303
self.id_diff()
self.state = 304
self.match(AutolevParser.T__2)
self.state = 305
self.expr(0)
self.state = 307
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.state = 306
self.expr(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OutputsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Output(self):
return self.getToken(AutolevParser.Output, 0)
def outputs2(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.Outputs2Context)
else:
return self.getTypedRuleContext(AutolevParser.Outputs2Context,i)
def getRuleIndex(self):
return AutolevParser.RULE_outputs
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOutputs" ):
listener.enterOutputs(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOutputs" ):
listener.exitOutputs(self)
def outputs(self):
localctx = AutolevParser.OutputsContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_outputs)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 309
self.match(AutolevParser.Output)
self.state = 310
self.outputs2()
self.state = 315
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 311
self.match(AutolevParser.T__9)
self.state = 312
self.outputs2()
self.state = 317
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Outputs2Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_outputs2
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOutputs2" ):
listener.enterOutputs2(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOutputs2" ):
listener.exitOutputs2(self)
def outputs2(self):
localctx = AutolevParser.Outputs2Context(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_outputs2)
try:
self.enterOuterAlt(localctx, 1)
self.state = 318
self.expr(0)
self.state = 320
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,36,self._ctx)
if la_ == 1:
self.state = 319
self.expr(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CodegenContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def functionCall(self):
return self.getTypedRuleContext(AutolevParser.FunctionCallContext,0)
def matrixInOutput(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.MatrixInOutputContext)
else:
return self.getTypedRuleContext(AutolevParser.MatrixInOutputContext,i)
def getRuleIndex(self):
return AutolevParser.RULE_codegen
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCodegen" ):
listener.enterCodegen(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCodegen" ):
listener.exitCodegen(self)
def codegen(self):
localctx = AutolevParser.CodegenContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_codegen)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 322
self.match(AutolevParser.ID)
self.state = 323
self.functionCall()
self.state = 335
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==1:
self.state = 324
self.match(AutolevParser.T__0)
self.state = 325
self.matrixInOutput()
self.state = 330
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 326
self.match(AutolevParser.T__9)
self.state = 327
self.matrixInOutput()
self.state = 332
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 333
self.match(AutolevParser.T__1)
self.state = 337
self.match(AutolevParser.ID)
self.state = 338
self.match(AutolevParser.T__19)
self.state = 339
self.match(AutolevParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CommandsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Save(self):
return self.getToken(AutolevParser.Save, 0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(AutolevParser.ID)
else:
return self.getToken(AutolevParser.ID, i)
def Encode(self):
return self.getToken(AutolevParser.Encode, 0)
def getRuleIndex(self):
return AutolevParser.RULE_commands
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommands" ):
listener.enterCommands(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommands" ):
listener.exitCommands(self)
def commands(self):
localctx = AutolevParser.CommandsContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_commands)
self._la = 0 # Token type
try:
self.state = 354
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [31]:
self.enterOuterAlt(localctx, 1)
self.state = 341
self.match(AutolevParser.Save)
self.state = 342
self.match(AutolevParser.ID)
self.state = 343
self.match(AutolevParser.T__19)
self.state = 344
self.match(AutolevParser.ID)
pass
elif token in [33]:
self.enterOuterAlt(localctx, 2)
self.state = 345
self.match(AutolevParser.Encode)
self.state = 346
self.match(AutolevParser.ID)
self.state = 351
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 347
self.match(AutolevParser.T__9)
self.state = 348
self.match(AutolevParser.ID)
self.state = 353
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VecContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def getRuleIndex(self):
return AutolevParser.RULE_vec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVec" ):
listener.enterVec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVec" ):
listener.exitVec(self)
def vec(self):
localctx = AutolevParser.VecContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_vec)
try:
self.state = 364
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [48]:
self.enterOuterAlt(localctx, 1)
self.state = 356
self.match(AutolevParser.ID)
self.state = 358
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 357
self.match(AutolevParser.T__20)
else:
raise NoViableAltException(self)
self.state = 360
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,41,self._ctx)
pass
elif token in [22]:
self.enterOuterAlt(localctx, 2)
self.state = 362
self.match(AutolevParser.T__21)
pass
elif token in [23]:
self.enterOuterAlt(localctx, 3)
self.state = 363
self.match(AutolevParser.T__22)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return AutolevParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParensContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParens" ):
listener.enterParens(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParens" ):
listener.exitParens(self)
class VectorOrDyadicContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def vec(self):
return self.getTypedRuleContext(AutolevParser.VecContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVectorOrDyadic" ):
listener.enterVectorOrDyadic(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVectorOrDyadic" ):
listener.exitVectorOrDyadic(self)
class ExponentContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExponent" ):
listener.enterExponent(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExponent" ):
listener.exitExponent(self)
class MulDivContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMulDiv" ):
listener.enterMulDiv(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMulDiv" ):
listener.exitMulDiv(self)
class AddSubContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAddSub" ):
listener.enterAddSub(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAddSub" ):
listener.exitAddSub(self)
class FloatContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def FLOAT(self):
return self.getToken(AutolevParser.FLOAT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFloat" ):
listener.enterFloat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFloat" ):
listener.exitFloat(self)
class IntContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def INT(self):
return self.getToken(AutolevParser.INT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInt" ):
listener.enterInt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInt" ):
listener.exitInt(self)
class IdEqualsExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdEqualsExpr" ):
listener.enterIdEqualsExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdEqualsExpr" ):
listener.exitIdEqualsExpr(self)
class NegativeOneContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(AutolevParser.ExprContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNegativeOne" ):
listener.enterNegativeOne(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNegativeOne" ):
listener.exitNegativeOne(self)
class FunctionContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def functionCall(self):
return self.getTypedRuleContext(AutolevParser.FunctionCallContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunction" ):
listener.enterFunction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunction" ):
listener.exitFunction(self)
class RangessContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def ranges(self):
return self.getTypedRuleContext(AutolevParser.RangesContext,0)
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangess" ):
listener.enterRangess(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangess" ):
listener.exitRangess(self)
class ColonContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColon" ):
listener.enterColon(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColon" ):
listener.exitColon(self)
class IdContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterId" ):
listener.enterId(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitId" ):
listener.exitId(self)
class ExpContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def EXP(self):
return self.getToken(AutolevParser.EXP, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExp" ):
listener.enterExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExp" ):
listener.exitExp(self)
class MatricesContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def matrix(self):
return self.getTypedRuleContext(AutolevParser.MatrixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMatrices" ):
listener.enterMatrices(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMatrices" ):
listener.exitMatrices(self)
class IndexingContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a AutolevParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(AutolevParser.ID, 0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(AutolevParser.ExprContext)
else:
return self.getTypedRuleContext(AutolevParser.ExprContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIndexing" ):
listener.enterIndexing(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIndexing" ):
listener.exitIndexing(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = AutolevParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 54
self.enterRecursionRule(localctx, 54, self.RULE_expr, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 408
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,47,self._ctx)
if la_ == 1:
localctx = AutolevParser.ExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 367
self.match(AutolevParser.EXP)
pass
elif la_ == 2:
localctx = AutolevParser.NegativeOneContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 368
self.match(AutolevParser.T__17)
self.state = 369
self.expr(12)
pass
elif la_ == 3:
localctx = AutolevParser.FloatContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 370
self.match(AutolevParser.FLOAT)
pass
elif la_ == 4:
localctx = AutolevParser.IntContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 371
self.match(AutolevParser.INT)
pass
elif la_ == 5:
localctx = AutolevParser.IdContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 372
self.match(AutolevParser.ID)
self.state = 376
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,43,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 373
self.match(AutolevParser.T__10)
self.state = 378
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,43,self._ctx)
pass
elif la_ == 6:
localctx = AutolevParser.VectorOrDyadicContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 379
self.vec()
pass
elif la_ == 7:
localctx = AutolevParser.IndexingContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 380
self.match(AutolevParser.ID)
self.state = 381
self.match(AutolevParser.T__0)
self.state = 382
self.expr(0)
self.state = 387
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==10:
self.state = 383
self.match(AutolevParser.T__9)
self.state = 384
self.expr(0)
self.state = 389
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 390
self.match(AutolevParser.T__1)
pass
elif la_ == 8:
localctx = AutolevParser.FunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 392
self.functionCall()
pass
elif la_ == 9:
localctx = AutolevParser.MatricesContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 393
self.matrix()
pass
elif la_ == 10:
localctx = AutolevParser.ParensContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 394
self.match(AutolevParser.T__11)
self.state = 395
self.expr(0)
self.state = 396
self.match(AutolevParser.T__12)
pass
elif la_ == 11:
localctx = AutolevParser.RangessContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 399
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==48:
self.state = 398
self.match(AutolevParser.ID)
self.state = 401
self.ranges()
self.state = 405
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,46,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 402
self.match(AutolevParser.T__10)
self.state = 407
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,46,self._ctx)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 427
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,49,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 425
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,48,self._ctx)
if la_ == 1:
localctx = AutolevParser.ExponentContext(self, AutolevParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 410
if not self.precpred(self._ctx, 16):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 16)")
self.state = 411
self.match(AutolevParser.T__23)
self.state = 412
self.expr(17)
pass
elif la_ == 2:
localctx = AutolevParser.MulDivContext(self, AutolevParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 413
if not self.precpred(self._ctx, 15):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 15)")
self.state = 414
_la = self._input.LA(1)
if not(_la==25 or _la==26):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 415
self.expr(16)
pass
elif la_ == 3:
localctx = AutolevParser.AddSubContext(self, AutolevParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 416
if not self.precpred(self._ctx, 14):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 14)")
self.state = 417
_la = self._input.LA(1)
if not(_la==17 or _la==18):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 418
self.expr(15)
pass
elif la_ == 4:
localctx = AutolevParser.IdEqualsExprContext(self, AutolevParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 419
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 420
self.match(AutolevParser.T__2)
self.state = 421
self.expr(4)
pass
elif la_ == 5:
localctx = AutolevParser.ColonContext(self, AutolevParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 422
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 423
self.match(AutolevParser.T__15)
self.state = 424
self.expr(3)
pass
self.state = 429
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,49,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[27] = self.expr_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 16)
if predIndex == 1:
return self.precpred(self._ctx, 15)
if predIndex == 2:
return self.precpred(self._ctx, 14)
if predIndex == 3:
return self.precpred(self._ctx, 3)
if predIndex == 4:
return self.precpred(self._ctx, 2)
| AutolevParser |
python | pandas-dev__pandas | pandas/tests/series/test_subclass.py | {
"start": 2131,
"end": 2667
} | class ____(pd.Series):
@property
def _constructor(self):
def _new(*args, **kwargs):
# some constructor logic that accesses the Series' name
if self.name == "test":
return pd.Series(*args, **kwargs)
return SubclassedSeries(*args, **kwargs)
return _new
def test_constructor_from_dict():
# https://github.com/pandas-dev/pandas/issues/52445
result = SubclassedSeries({"a": 1, "b": 2, "c": 3})
assert isinstance(result, SubclassedSeries)
| SubclassedSeries |
python | django__django | tests/gis_tests/geoadmin/models.py | {
"start": 70,
"end": 270
} | class ____(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
class Meta:
app_label = "geoadmin"
def __str__(self):
return self.name
| City |
python | getsentry__sentry | src/sentry/backup/comparators.py | {
"start": 13935,
"end": 15516
} | class ____(JSONScrubbingComparator, ABC):
"""Comparator that compares private values, but then safely truncates them to ensure that they
do not leak out in logs, stack traces, etc."""
def __init__(self, *fields: str):
super().__init__(*fields)
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
findings = []
fields = sorted(self.fields)
for f in fields:
if left["fields"].get(f) is None and right["fields"].get(f) is None:
continue
lv = left["fields"][f]
rv = right["fields"][f]
if lv != rv:
lv = self.truncate([lv] if not isinstance(lv, list) else lv)[0]
rv = self.truncate([rv] if not isinstance(rv, list) else rv)[0]
findings.append(
ComparatorFinding(
kind=self.get_kind(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"""the left value ("{lv}") of `{f}` was not equal to the right value ("{rv}")""",
)
)
return findings
def scrub(
self,
left: Any,
right: Any,
) -> None:
super().__scrub__(left, right, self.truncate)
@abstractmethod
def truncate(self, data: list[str]) -> list[str]:
"""An abstract method signature which implements a specific truncation algorithm to do the
actual obfuscation."""
| ObfuscatingComparator |
python | mlflow__mlflow | mlflow/exceptions.py | {
"start": 5586,
"end": 5689
} | class ____(MlflowException):
"""Exception thrown when executing a project fails"""
| ExecutionException |
python | scipy__scipy | scipy/interpolate/tests/test_gil.py | {
"start": 107,
"end": 1831
} | class ____:
"""Check if the GIL is properly released by scipy.interpolate functions."""
def setup_method(self):
self.messages = []
def log(self, message):
self.messages.append(message)
def make_worker_thread(self, target, args):
log = self.log
class WorkerThread(threading.Thread):
def run(self):
log('interpolation started')
target(*args)
log('interpolation complete')
return WorkerThread()
@pytest.mark.xslow
@pytest.mark.xfail(reason='race conditions, may depend on system load')
def test_rectbivariatespline(self):
def generate_params(n_points):
x = y = np.linspace(0, 1000, n_points)
x_grid, y_grid = np.meshgrid(x, y)
z = x_grid * y_grid
return x, y, z
def calibrate_delay(requested_time):
for n_points in itertools.count(5000, 1000):
args = generate_params(n_points)
time_started = time.time()
interpolate(*args)
if time.time() - time_started > requested_time:
return args
def interpolate(x, y, z):
scipy.interpolate.RectBivariateSpline(x, y, z)
args = calibrate_delay(requested_time=3)
worker_thread = self.make_worker_thread(interpolate, args)
worker_thread.start()
for i in range(3):
time.sleep(0.5)
self.log('working')
worker_thread.join()
assert self.messages == [
'interpolation started',
'working',
'working',
'working',
'interpolation complete',
]
| TestGIL |
python | eth-brownie__brownie | brownie/typing.py | {
"start": 3869,
"end": 3951
} | class ____(TypedDict):
outputSelection: OutputSelection
@final
| _CompilerSettings |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/checkpoint_writer.py | {
"start": 1325,
"end": 1654
} | class ____:
"""
Configuration options for the CheckpointWriter.
Attributes:
write_barrier_timeout_secs: Maximum time in seconds to wait for all ranks
to reach the checkpoint barrier before timing out. Default is 600 seconds.
"""
write_barrier_timeout_secs: int = 600
| CheckpointWriterConfig |
python | astropy__astropy | astropy/coordinates/representation/base.py | {
"start": 22462,
"end": 23262
} | class ____(BaseRepresentationOrDifferentialInfo):
@property
def _represent_as_dict_attrs(self):
attrs = super()._represent_as_dict_attrs
if self._parent._differentials:
attrs += ("differentials",)
return attrs
def _represent_as_dict(self, attrs=None):
out = super()._represent_as_dict(attrs)
for key, value in out.pop("differentials", {}).items():
out[f"differentials.{key}"] = value
return out
def _construct_from_dict(self, map):
differentials = {}
for key in list(map.keys()):
if key.startswith("differentials."):
differentials[key[14:]] = map.pop(key)
map["differentials"] = differentials
return super()._construct_from_dict(map)
| RepresentationInfo |
python | astropy__astropy | astropy/coordinates/tests/test_frames.py | {
"start": 5697,
"end": 58381
} | class ____:
def test_wrong_units_for_v_sun(self, frame, vel_name, extra_kwargs):
# Regression test for gh-17969. No unit and wrong unit give different errors.
kwargs = {vel_name: [12.0, 11.0, 10.0]} | extra_kwargs
with pytest.raises(TypeError, match="set.*not have a unit"):
frame(**kwargs)
kwargs[vel_name] *= u.km
with pytest.raises(
u.UnitConversionError, match="'km'.*and 'km / s'.*convertible"
):
frame(**kwargs)
def test_using_differential_representation(self, frame, vel_name, extra_kwargs):
# For backward compatibility, allow DifferentialRepresentation.
kwargs = {vel_name: [12.0, 11.0, 10.0] * u.km / u.s} | extra_kwargs
exp = frame(**kwargs)
kwargs[vel_name] = r.CartesianDifferential(kwargs[vel_name])
got = frame(**kwargs)
assert got == exp
def test_create_data_frames():
# from repr
i1 = ICRS(r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2 * u.deg))
# from preferred name
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
i4 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.0] * u.deg
def test_create_orderered_data():
TOL = 1e-10 * u.deg
i = ICRS(1 * u.deg, 2 * u.deg)
assert (i.ra - 1 * u.deg) < TOL
assert (i.dec - 2 * u.deg) < TOL
g = Galactic(1 * u.deg, 2 * u.deg)
assert (g.l - 1 * u.deg) < TOL
assert (g.b - 2 * u.deg) < TOL
a = AltAz(1 * u.deg, 2 * u.deg)
assert (a.az - 1 * u.deg) < TOL
assert (a.alt - 2 * u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1 * u.deg, 2 * u.deg, 1 * u.deg, 2 * u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
ICRS(sph, 1 * u.deg, 2 * u.deg)
def test_create_nodata_frames():
i = ICRS()
assert len(i.frame_attributes) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_defaults()["equinox"]
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_defaults()["equinox"]
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (
FK4.get_frame_attr_defaults()["obstime"],
FK4.get_frame_attr_defaults()["equinox"],
)
def test_no_data_nonscalar_frames():
a1 = AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3, 1)) * u.deg_C,
)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError, match=r".*inconsistent shapes.*"):
AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3,)) * u.deg_C,
)
def test_frame_repr():
i = ICRS()
assert repr(i) == "<ICRS Frame>"
f5 = FK5()
assert repr(f5).startswith("<FK5 Frame (equinox=")
i2 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
assert repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n (1., 2.)>"
assert (
repr(i3)
== "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n (1., 2., 3.)>"
)
# try with arrays
i2 = ICRS(ra=[1.1, 2.1] * u.deg, dec=[2.1, 3.1] * u.deg)
i3 = ICRS(
ra=[1.1, 2.1] * u.deg, dec=[-15.6, 17.1] * u.deg, distance=[11.0, 21.0] * u.kpc
)
assert (
repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n [(1.1, 2.1), (2.1, 3.1)]>"
)
assert (
repr(i3) == "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n"
" [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>"
)
def test_frame_repr_vels():
i = ICRS(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=2 * u.marcsec / u.yr,
)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert (
repr(i) == "<ICRS Coordinate: (ra, dec) in deg\n"
" (1., 2.)\n"
" (pm_ra_cosdec, pm_dec) in mas / yr\n"
" (1., 2.)>"
)
def test_converting_units():
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r"(.*?=\d\.).*?( .*?=\d\.).*?( .*)")
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.0 * u.deg, dec=2.0 * u.deg)
i2_many = ICRS(ra=[2.0, 4.0] * u.deg, dec=[2.0, -8.1] * u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5()).transform_to(ICRS())
i4_many = i2_many.transform_to(FK5()).transform_to(ICRS())
ri2 = "".join(rexrepr.split(repr(i2)))
ri4 = "".join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = "".join(rexrepr.split(repr(i2_many)))
ri4_many = "".join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra", u.hourangle),
RepresentationMapping("lat", "dec", None),
RepresentationMapping("distance", "distance"),
] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = "".join(rexrepr.split(repr(i2)))
rfi = "".join(rexrepr.split(repr(fi)))
rfi = re.sub("FakeICRS", "ICRS", rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "rara", u.hourangle),
RepresentationMapping("lat", "decdec", u.degree),
RepresentationMapping("distance", "distance", u.kpc),
]
}
i1 = NewICRS1(
rara=10 * u.degree,
decdec=-12 * u.deg,
distance=1000 * u.pc,
pm_rara_cosdecdec=100 * u.mas / u.yr,
pm_decdec=17 * u.mas / u.yr,
radial_velocity=10 * u.km / u.s,
)
assert allclose(i1.rara, 10 * u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.distance, 1000 * u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(
r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential
)
assert allclose(i1.rara, 10 * u.deg)
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ang1", u.hourangle),
RepresentationMapping("lat", "ang2", u.degree),
RepresentationMapping("distance", "howfar", u.kpc),
]
}
i2 = NewICRS2(ang1=10 * u.degree, ang2=-12 * u.deg, howfar=1000 * u.pc)
assert allclose(i2.ang1, 10 * u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12 * u.deg)
assert allclose(i2.howfar, 1000 * u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping("d_lon_coslat", "pm_ang1", u.hourangle / u.year),
RepresentationMapping("d_lat", "pm_ang2"),
RepresentationMapping("d_distance", "vlos", u.kpc / u.Myr),
]
}
i3 = NewICRS3(
lon=10 * u.degree,
lat=-12 * u.deg,
distance=1000 * u.pc,
pm_ang1=1 * u.mas / u.yr,
pm_ang2=2 * u.mas / u.yr,
vlos=100 * u.km / u.s,
)
assert allclose(i3.pm_ang1, 1 * u.mas / u.yr)
assert i3.pm_ang1.unit == u.hourangle / u.year
assert allclose(i3.pm_ang2, 2 * u.mas / u.yr)
assert allclose(i3.vlos, 100 * u.km / u.s)
assert i3.vlos.unit == u.kpc / u.Myr
def test_realizing():
rep = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time("J2001"))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_defaults()["equinox"]
# Check that a nicer error message is returned:
with pytest.raises(
TypeError, match="Class passed as data instead of a representation"
):
f.realize_frame(f.representation_type)
def test_replicating():
i = ICRS(ra=[1] * u.deg, dec=[2] * u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0 * u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not i.isscalar
assert i.shape == (1,)
assert len(i) == 1
assert not iclone.has_data
assert iclone.isscalar
assert iclone.shape == ()
with pytest.raises(TypeError, match="no len()"):
len(iclone)
aa = AltAz(alt=1 * u.deg, az=2 * u.deg, obstime=Time("J2000"))
aaclone = aa.replicate_without_data(obstime=Time(["J2001"]))
assert aa.has_data
assert aa.isscalar
assert aa.shape == ()
assert not aaclone.has_data
assert not aaclone.isscalar
assert aaclone.shape == (1,)
assert len(aaclone) == 1
assert not np.any(aa.obstime == aaclone.obstime)
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
rep = r.SphericalRepresentation(
[1, 2, 3] * u.deg, [4, 5, 6] * u.deg, [7, 8, 9] * u.kpc
)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate.
"""
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f4 = f.transform_to(FK4())
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4().equinox
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i.transform_to(ICRS())
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f2 = f.transform_to(FK5()) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i1.transform_to(Galactic()).transform_to(ICRS())
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
# Also checks that shape and length of all make sense.
times = Time("2016-08-23") + np.linspace(0, 10, 12) * u.day
coo1 = ICRS(
ra=[[0.0], [10.0], [20.0]] * u.deg, dec=[[-30.0], [30.0], [60.0]] * u.deg
)
assert coo1.shape == (3, 1)
assert len(coo1) == 3
fk5 = FK5(equinox=times)
assert fk5.shape == (12,)
assert len(fk5) == 12
coo2 = coo1.transform_to(fk5)
assert coo2.shape == (3, 12)
assert len(coo2) == 3
def test_setitem_no_velocity():
"""Test different flavors of item setting for a Frame without a velocity."""
obstime = "B1955"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = sc0.copy()
sc1_repr = repr(sc1)
assert "representation" in sc1.cache
sc1[1] = sc2[0]
assert sc1.cache == {}
assert repr(sc2) != sc1_repr
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
# Works for array-valued obstime so long as they are considered equivalent
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, obstime])
sc1[0] = sc2[0]
# Multidimensional coordinates
sc1 = FK4([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc2 = FK4([[10, 20], [30, 40]] * u.deg, [[50, 60], [70, 80]] * u.deg)
sc1[0] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [[10, 20], [3, 4]])
assert np.allclose(sc1.dec.to_value(u.deg), [[50, 60], [7, 8]])
def test_setitem_velocities():
"""Test different flavors of item setting for a Frame with a velocity."""
sc0 = FK4(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
)
sc2 = FK4(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
obstime = "B1950"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = Galactic(sc0.ra, sc0.dec)
with pytest.raises(
TypeError, match="can only set from object of same class: Galactic vs. FK4"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra[0], sc0.dec[0], obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
sc1 = FK4(obstime=obstime)
with pytest.raises(ValueError, match="cannot set frame which has no data"):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
# Wrong shape
sc1 = FK4([sc0.ra], [sc0.dec], obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
c = FK4(1 * u.deg, 2 * u.deg, equinox="J2001.5", obstime="2000-01-01 12:00:00")
assert c.equinox == Time("J2001.5")
assert c.obstime == Time("2000-01-01 12:00:00")
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert "Invalid time input" in str(err.value)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime="hello")
assert "Invalid time input" in str(err.value)
# A vector time should work if the shapes match, and we automatically
# broadcast the basic data.
c = FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=["J2000", "J2001"])
assert c.shape == (2,)
c = FK4(1 * u.deg, 2 * u.deg, obstime=["J2000", "J2001"])
assert c.shape == (2,)
# If the shapes are not broadcastable, then we should raise an exception.
with pytest.raises(ValueError, match=r".*inconsistent shapes.*"):
FK4([1, 2, 3] * u.deg, [4, 5, 6] * u.deg, obstime=["J2000", "J2001"])
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
c1 = FK5(ra=1 * u.deg, dec=1 * u.deg)
c2 = FK5(
ra=1 * u.deg, dec=1 * u.deg, equinox=FK5.get_frame_attr_defaults()["equinox"]
)
c3 = FK5(ra=1 * u.deg, dec=1 * u.deg, equinox=Time("J2001.5"))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default("equinox")
assert not c2.is_frame_attr_default("equinox")
assert not c3.is_frame_attr_default("equinox")
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
assert c4.is_frame_attr_default("equinox")
assert not c5.is_frame_attr_default("equinox")
def test_altaz_attributes():
aa = AltAz(1 * u.deg, 2 * u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1 * u.deg, 2 * u.deg, obstime="J2000")
assert aa2.obstime == Time("J2000")
aa3 = AltAz(
1 * u.deg, 2 * u.deg, location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
)
assert isinstance(aa3.location, EarthLocation)
def test_hadec_attributes():
hd = HADec(1 * u.hourangle, 2 * u.deg)
assert hd.ha == 1.0 * u.hourangle
assert hd.dec == 2 * u.deg
assert hd.obstime is None
assert hd.location is None
hd2 = HADec(
23 * u.hourangle,
-2 * u.deg,
obstime="J2000",
location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m),
)
assert_allclose(hd2.ha, -1 * u.hourangle)
assert hd2.dec == -2 * u.deg
assert hd2.obstime == Time("J2000")
assert isinstance(hd2.location, EarthLocation)
sr = hd2.represent_as(r.SphericalRepresentation)
assert_allclose(sr.lon, -1 * u.hourangle)
def test_itrs_earth_location():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
sat = EarthLocation(
lat=-24.6609379 * u.deg, lon=160.34199789 * u.deg, height=420.17927591 * u.km
)
itrs_geo = sat.get_itrs()
eloc = itrs_geo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
topo_itrs_repr = itrs_geo.cartesian - loc.get_itrs().cartesian
itrs_topo = ITRS(topo_itrs_repr, location=loc)
eloc = itrs_topo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
obstime = Time("J2010") # Anything different from default
topo_itrs_repr2 = sat.get_itrs(obstime).cartesian - loc.get_itrs(obstime).cartesian
itrs_topo2 = ITRS(topo_itrs_repr2, location=loc, obstime=obstime)
eloc2 = itrs_topo2.earth_location
assert_allclose(sat.lon, eloc2.lon)
assert_allclose(sat.lat, eloc2.lat)
assert_allclose(sat.height, eloc2.height)
wgs84 = ITRS(325 * u.deg, 2 * u.deg, representation_type="wgs84geodetic")
assert wgs84.lon == 325 * u.deg
assert wgs84.lat == 2 * u.deg
assert wgs84.height == 0.0 * u.m
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
# Create the frame object.
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
icrs_cyl = icrs.cylindrical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation_type = r.CartesianRepresentation
assert icrs.representation_type == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation_type = r.CylindricalRepresentation
assert icrs.representation_type == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation_type = "spherical"
assert icrs.representation_type is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ("x", "y", "z"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing setter input using text argument for cylindrical.
icrs.representation_type = "cylindrical"
assert icrs.representation_type is r.CylindricalRepresentation
assert icrs_cyl.rho == icrs.rho
assert icrs_cyl.phi == icrs.phi
assert icrs_cyl.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = "WRONG"
assert "but must be a BaseRepresentation class" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = ICRS
assert "but must be a BaseRepresentation class" in str(err.value)
def test_represent_as():
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
cart1 = icrs.represent_as("cartesian")
cart2 = icrs.represent_as(r.CartesianRepresentation)
assert cart1.x == cart2.x
assert cart1.y == cart2.y
assert cart1.z == cart2.z
# now try with velocities
icrs = ICRS(
ra=0 * u.deg,
dec=0 * u.deg,
distance=10 * u.kpc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=1 * u.km / u.s,
)
# single string
rep2 = icrs.represent_as("cylindrical")
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials["s"], r.CylindricalDifferential)
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as("odaigahara")
def test_shorthand_representations():
rep = r.CartesianRepresentation([1, 2, 3] * u.pc)
dif = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
cyl = icrs.cylindrical
assert isinstance(cyl, r.CylindricalRepresentation)
assert isinstance(cyl.differentials["s"], r.CylindricalDifferential)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalCosLatDifferential)
def test_equal():
obstime = "B1955"
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
v = sc1[0] == sc2[0]
assert isinstance(v, (bool, np.bool_))
assert v
v = sc1[0] != sc2[0]
assert isinstance(v, (bool, np.bool_))
assert not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
v = sc1[0] == sc2[0]
assert isinstance(v, (bool, np.bool_))
assert v
v = sc1[0] != sc2[0]
assert isinstance(v, (bool, np.bool_))
assert not v
assert (FK4() == ICRS()) is False
assert (FK4() == FK4(obstime="J1999")) is False
def test_equal_exceptions():
# Shape mismatch
sc1 = FK4([1, 2, 3] * u.deg, [3, 4, 5] * u.deg)
with pytest.raises(ValueError, match="cannot compare: shape mismatch"):
sc1 == sc1[:2] # noqa: B015
# Different representation_type
sc1 = FK4(1, 2, 3, representation_type="cartesian")
sc2 = FK4(1 * u.deg, 2 * u.deg, 2, representation_type="spherical")
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: CartesianRepresentation vs. SphericalRepresentation"
),
):
sc1 == sc2 # noqa: B015
# Different differential type
sc1 = FK4(1 * u.deg, 2 * u.deg, radial_velocity=1 * u.km / u.s)
sc2 = FK4(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=1 * u.mas / u.yr, pm_dec=1 * u.mas / u.yr
)
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: RadialDifferential vs. UnitSphericalCosLatDifferential"
),
):
sc1 == sc2 # noqa: B015
# Different frame attribute
sc1 = FK5(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J1999")
with pytest.raises(
TypeError,
match=r"cannot compare: objects must have equivalent "
r"frames: <FK5 Frame \(equinox=J2000.000\)> "
r"vs. <FK5 Frame \(equinox=J1999.000\)>",
):
sc1 == sc2 # noqa: B015
# Different frame
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
with pytest.raises(
TypeError,
match="cannot compare: objects must have equivalent "
r"frames: <FK4 Frame \(equinox=B1950.000, obstime=B1950.000\)> "
r"vs. <FK5 Frame \(equinox=J2000.000\)>",
):
sc1 == sc2 # noqa: B015
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK4()
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc1 == sc2 # noqa: B015
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc2 == sc1 # noqa: B015
def test_dynamic_attrs():
c = ICRS(1 * u.deg, 2 * u.deg)
assert "ra" in dir(c)
assert "dec" in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err.value)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err.value)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert "does not have associated data" in str(excinfo.value)
def test_nodata_len_shape():
i = ICRS()
assert i.shape == ()
with pytest.raises(TypeError, match="Scalar.*has no len()"):
len(i)
def test_len0_data():
i = ICRS([] * u.deg, [] * u.deg)
assert i.has_data
repr(i)
assert len(i) == 0
assert i.shape == (0,)
def test_len0_nodata():
fk5 = FK5(equinox=Time([], format="jyear"))
assert len(fk5) == 0
assert fk5.shape == (0,)
def test_quantity_attributes():
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime="J2002", obsgeoloc=[1, 2, 3] * u.km, obsgeovel=[4, 5, 6] * u.km / u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3] * u.km / u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3] * u.km) # incorrect shape
def test_quantity_attribute_default():
# The default default (yes) is None:
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.deg)
frame = MyCoord()
assert frame.someval is None
frame = MyCoord(someval=15 * u.deg)
assert u.isclose(frame.someval, 15 * u.deg)
# This should work if we don't explicitly pass in a unit, but we pass in a
# default value with a unit
class MyCoord2(BaseCoordinateFrame):
someval = QuantityAttribute(15 * u.deg)
frame = MyCoord2()
assert u.isclose(frame.someval, 15 * u.deg)
# Since here no shape was given, we can set to any shape we like.
frame = MyCoord2(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert np.all(frame.someval == 1 * u.deg)
# We should also be able to insist on a given shape.
class MyCoord3(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.arcsec, shape=(3,))
frame = MyCoord3(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert frame.someval.unit == u.arcsec
assert u.allclose(frame.someval.value, 3600.0)
# The wrong shape raises.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=1.0 * u.deg)
# As does the wrong unit.
with pytest.raises(u.UnitsError):
MyCoord3(someval=np.ones(3) * u.m)
# We are allowed a short-cut for zero.
frame0 = MyCoord3(someval=0)
assert frame0.someval.shape == (3,)
assert frame0.someval.unit == u.arcsec
assert np.all(frame0.someval.value == 0.0)
# But not if it has the wrong shape.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=np.zeros(2))
# This should fail, if we don't pass in a default or a unit
with pytest.raises(ValueError):
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute()
def test_eloc_attributes():
el = EarthLocation(lon=12.3 * u.deg, lat=45.6 * u.deg, height=1 * u.km)
it = ITRS(
r.SphericalRepresentation(lon=12.3 * u.deg, lat=45.6 * u.deg, distance=1 * u.km)
)
gc = GCRS(ra=12.3 * u.deg, dec=45.6 * u.deg, distance=6375 * u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000 * u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500 * u.km
def test_equivalent_frames():
i = ICRS()
i2 = ICRS(1 * u.deg, 2 * u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f0 = FK5() # this J2000 is TT
f1 = FK5(equinox="J2000")
f2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
f3 = FK5(equinox="J2010")
f4 = FK4(equinox="J2010")
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f0.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime="J2010")
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_equivalent_frame_coordinateattribute():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
# These frames should not be considered equivalent
f0 = FrameWithCoordinateAttribute()
f1 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2000")
)
f2 = FrameWithCoordinateAttribute(
coord_attr=HCRS(3 * u.deg, 4 * u.deg, obstime="J2000")
)
f3 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2001")
)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
assert not f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f2.is_equivalent_frame(f3)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
assert f2.is_equivalent_frame(deepcopy(f2))
assert f3.is_equivalent_frame(deepcopy(f3))
def test_equivalent_frame_locationattribute():
class FrameWithLocationAttribute(BaseCoordinateFrame):
loc_attr = EarthLocationAttribute()
# These frames should not be considered equivalent
f0 = FrameWithLocationAttribute()
location = EarthLocation(lat=-34, lon=19, height=300)
f1 = FrameWithLocationAttribute(loc_attr=location)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
def test_representation_subclass():
# Regression test for #3354
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(
representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(
representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert (
repr(frame)
== "<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n (32., 20.)>"
)
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation spam spam spam>"
frame = FK5(
NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation_type=NewSphericalRepresentation,
)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation_type = "cartesian"
assert c[0].representation_type is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert "does not have associated data" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_inplace_array():
i = ICRS([[1, 2], [3, 4]] * u.deg, [[10, 20], [30, 40]] * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200] * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]] * u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]] * u.deg)
def test_inplace_change():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[()] = 10 * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
dif1 = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
dif2 = r.CartesianDifferential([1, 2, 3] * u.km / u.s**2)
rep = r.CartesianRepresentation(
[1, 2, 3] * u.pc, differentials={"s": dif1, "s2": dif2}
)
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e.value)
with pytest.raises(TypeError) as e:
ICRS(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
def test_non_spherical_representation_unit_creation(unitphysics): # noqa: F811
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg, r=1 * u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg)
assert isinstance(picu.data, unitphysics)
def test_attribute_repr():
class Spam:
def _astropy_repr_in_frame(self):
return "TEST REPR"
class TestFrame(BaseCoordinateFrame):
attrtest = Attribute(default=Spam())
assert "TEST REPR" in repr(TestFrame())
def test_component_names_repr():
# Frame class with new component names that includes a name swap
class NameChangeFrame(BaseCoordinateFrame):
default_representation = r.PhysicsSphericalRepresentation
frame_specific_representation_info = {
r.PhysicsSphericalRepresentation: [
RepresentationMapping("phi", "theta", u.deg),
RepresentationMapping("theta", "phi", u.arcsec),
RepresentationMapping("r", "JUSTONCE", u.AU),
]
}
frame = NameChangeFrame(0 * u.deg, 0 * u.arcsec, 0 * u.AU)
# Check for the new names in the Frame repr
assert "(theta, phi, JUSTONCE)" in repr(frame)
# Check that the letter "r" has not been replaced more than once in the Frame repr
assert repr(frame).count("JUSTONCE") == 1
def test_galactocentric_defaults():
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
with galactocentric_frame_defaults.set("latest"):
galcen_latest = Galactocentric()
# parameters that changed
assert not u.allclose(galcen_pre40.galcen_distance, galcen_40.galcen_distance)
assert not u.allclose(galcen_pre40.z_sun, galcen_40.z_sun)
for k in galcen_40.frame_attributes:
if isinstance(getattr(galcen_40, k), BaseCoordinateFrame):
continue # skip coordinate comparison...
elif isinstance(getattr(galcen_40, k), CartesianDifferential):
assert u.allclose(
getattr(galcen_40, k).d_xyz, getattr(galcen_latest, k).d_xyz
)
else:
assert getattr(galcen_40, k) == getattr(galcen_latest, k)
# test validate Galactocentric
with galactocentric_frame_defaults.set("latest"):
params = galactocentric_frame_defaults.validate(galcen_latest)
references = galcen_latest.frame_attribute_references
state = {"parameters": params, "references": references}
assert galactocentric_frame_defaults.parameters == params
assert galactocentric_frame_defaults.references == references
assert galactocentric_frame_defaults._state == state
# Test not one of accepted parameter types
with pytest.raises(ValueError):
galactocentric_frame_defaults.validate(ValueError)
# test parameters property
assert (
galactocentric_frame_defaults.parameters
== galactocentric_frame_defaults.parameters
)
def test_galactocentric_references():
# references in the "scientific paper"-sense
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
for k in galcen_pre40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_pre40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
for k in galcen_40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_custom = Galactocentric(z_sun=15 * u.pc)
for k in galcen_custom.frame_attributes:
if k == "roll": # no reference for this parameter
continue
if k == "z_sun":
assert k not in galcen_custom.frame_attribute_references
else:
assert k in galcen_custom.frame_attribute_references
def test_coordinateattribute_transformation():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
hcrs = HCRS(1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-02-03")
f1_frame = FrameWithCoordinateAttribute(coord_attr=hcrs)
f1_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(hcrs))
# The input is already HCRS, so the frame attribute should not change it
assert f1_frame.coord_attr == hcrs
# The output should not be different if a SkyCoord is provided
assert f1_skycoord.coord_attr == f1_frame.coord_attr
gcrs = GCRS(4 * u.deg, 5 * u.deg, 6 * u.AU, obstime="2004-05-06")
f2_frame = FrameWithCoordinateAttribute(coord_attr=gcrs)
f2_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(gcrs))
# The input needs to be converted from GCRS to HCRS
assert isinstance(f2_frame.coord_attr, HCRS)
# The `obstime` frame attribute should have been "merged" in a SkyCoord-style transformation
assert f2_frame.coord_attr.obstime == gcrs.obstime
# The output should not be different if a SkyCoord is provided
assert f2_skycoord.coord_attr == f2_frame.coord_attr
def test_realize_frame_accepts_kwargs():
c1 = ICRS(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
representation_type=r.CartesianRepresentation,
)
new_data = r.CartesianRepresentation(x=11 * u.pc, y=12 * u.pc, z=13 * u.pc)
c2 = c1.realize_frame(new_data, representation_type="cartesian")
c3 = c1.realize_frame(new_data, representation_type="cylindrical")
assert c2.representation_type == r.CartesianRepresentation
assert c3.representation_type == r.CylindricalRepresentation
def test_nameless_frame_subclass():
"""Note: this is a regression test for #11096"""
class Test:
pass
# Subclass from a frame class and a non-frame class.
# This subclassing is the test!
class NewFrame(ICRS, Test):
pass
def test_frame_coord_comparison():
"""Test that frame can be compared to a SkyCoord"""
frame = ICRS(0 * u.deg, 0 * u.deg)
coord = SkyCoord(frame)
other = SkyCoord(ICRS(0 * u.deg, 1 * u.deg))
assert frame == coord
assert frame != other
assert not (frame == other)
error_msg = "objects must have equivalent frames"
with pytest.raises(TypeError, match=error_msg):
frame == SkyCoord(AltAz("0d", "1d")) # noqa: B015
coord = SkyCoord(ra=12 * u.hourangle, dec=5 * u.deg, frame=FK5(equinox="J1950"))
frame = FK5(ra=12 * u.hourangle, dec=5 * u.deg, equinox="J2000")
with pytest.raises(TypeError, match=error_msg):
coord == frame # noqa: B015
frame = ICRS()
coord = SkyCoord(0 * u.deg, 0 * u.deg, frame=frame)
error_msg = "Can only compare SkyCoord to Frame with data"
with pytest.raises(ValueError, match=error_msg):
frame == coord # noqa: B015
@pytest.mark.parametrize(
["s1", "s2"],
(
((1,), (1,)),
((2,), (1,)),
((1,), (2,)),
((2,), (2,)),
((2, 1), (1,)),
((1,), (2, 1)),
((2, 1), (1, 3)),
),
)
def test_altaz_broadcast(s1, s2):
"""Note: Regression test for #5982"""
where = EarthLocation.from_geodetic(lat=45 * u.deg, lon=30 * u.deg, height=0 * u.m)
time = Time(np.full(s1, 58000.0), format="mjd")
angle = np.full(s2, 45.0) * u.deg
result = AltAz(alt=angle, az=angle, obstime=time, location=where)
assert result.shape == np.broadcast_shapes(s1, s2)
def test_transform_altaz_array_obstime():
"""Note: Regression test for #12965"""
obstime = Time("2010-01-01T00:00:00")
location = EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
frame1 = AltAz(location=location, obstime=obstime)
coord1 = SkyCoord(alt=80 * u.deg, az=0 * u.deg, frame=frame1)
obstimes = obstime + np.linspace(0, 15, 50) * u.min
frame2 = AltAz(location=location, obstime=obstimes)
coord2 = SkyCoord(alt=coord1.alt, az=coord1.az, frame=frame2)
assert np.all(coord2.alt == 80 * u.deg)
assert np.all(coord2.az == 0 * u.deg)
assert coord2.shape == (50,)
# test transformation to ICRS works
assert len(coord2.icrs) == 50
def test_spherical_offsets_by_broadcast():
"""Note: Regression test for #14383"""
assert SkyCoord(
ra=np.array([123, 134, 145]), dec=np.array([45, 56, 67]), unit=u.deg
).spherical_offsets_by(2 * u.deg, 2 * u.deg).shape == (3,)
@pytest.mark.parametrize("shape", [(1,), (2,)])
def test_spherical_offsets_with_wrap(shape):
# see https://github.com/astropy/astropy/issues/16219
sc = SkyCoord(ra=np.broadcast_to(123.0, shape), dec=90.0, unit=u.deg)
scop = sc.spherical_offsets_by(+2 * u.deg, 0 * u.deg)
assert scop.shape == shape
scom = sc.spherical_offsets_by(-2 * u.deg, 0 * u.deg)
assert scom.shape == shape
def test_insert():
# Tests are a subset of those in test_sky_coord.
c0 = ICRS([1, 2] * u.deg, [3, 4] * u.deg)
c1 = ICRS(5 * u.deg, 6 * u.deg)
c3 = ICRS([10, 20] * u.deg, [30, 40] * u.deg)
# Insert a scalar
c = c0.insert(1, c1)
assert skycoord_equal(c, ICRS([1, 5, 2] * u.deg, [3, 6, 4] * u.deg))
# Insert length=2 array at start of array
c = c0.insert(0, c3)
assert skycoord_equal(c, ICRS([10, 20, 1, 2] * u.deg, [30, 40, 3, 4] * u.deg))
# Insert length=2 array at end of array
c = c0.insert(2, c3)
assert skycoord_equal(c, ICRS([1, 2, 10, 20] * u.deg, [3, 4, 30, 40] * u.deg))
| TestCartesianVelocity |
python | getsentry__sentry | src/sentry/apidocs/examples/organization_member_examples.py | {
"start": 2123,
"end": 2618
} | class ____:
CREATE_ORG_MEMBER = [
OpenApiExample(
"Add a member to an organization",
value=ORGANIZATION_MEMBER,
status_codes=["201"],
response_only=True,
)
]
LIST_ORG_MEMBERS = [
OpenApiExample(
"List organization members",
value=[ORGANIZATION_MEMBER, INVITED_ORGANIZATION_MEMBER],
status_codes=["200"],
response_only=True,
)
]
| OrganizationMemberExamples |
python | fastai__fastai | fastai/torch_core.py | {
"start": 24171,
"end": 26863
} | class ____(fastuple, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
add_docs(TitledInt, "An `int` with `show`"); add_docs(TitledStr, "An `str` with `show`");
add_docs(TitledFloat, "A `float` with `show`"); add_docs(TitledTuple, "A `fastuple` with `show`")
# %% ../nbs/00_torch_core.ipynb 149
@patch
def truncate(self:TitledStr, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# %% ../nbs/00_torch_core.ipynb 151
if not hasattr(pd.DataFrame,'_old_init'): pd.DataFrame._old_init = pd.DataFrame.__init__
# %% ../nbs/00_torch_core.ipynb 152
@patch
def __init__(self:pd.DataFrame, data=None, index=None, columns=None, dtype=None, copy=None):
if data is not None and isinstance(data, Tensor): data = to_np(data)
self._old_init(data, index=index, columns=columns, dtype=dtype, copy=copy)
# %% ../nbs/00_torch_core.ipynb 153
def get_empty_df(n):
"Return `n` empty rows of a dataframe"
df = pd.DataFrame(index = range(n))
return [df.iloc[i] for i in range(n)]
# %% ../nbs/00_torch_core.ipynb 154
def display_df(df):
"Display `df` in a notebook or defaults to print"
try: from IPython.display import display, HTML
except: return print(df)
display(HTML(df.to_html()))
# %% ../nbs/00_torch_core.ipynb 155
def get_first(c):
"Get the first element of c, even if c is a dataframe"
return getattr(c, 'iloc', c)[0]
# %% ../nbs/00_torch_core.ipynb 156
def one_param(m):
"First parameter in `m`"
return first(m.parameters())
# %% ../nbs/00_torch_core.ipynb 157
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
# %% ../nbs/00_torch_core.ipynb 158
def find_device(b):
"Recursively search the device of `b`."
return item_find(b).device
# %% ../nbs/00_torch_core.ipynb 160
def find_bs(b):
"Recursively search the batch size of `b`."
res = item_find(b)
if not hasattr(res, "shape"): return len(b)
return res.shape[0]
# %% ../nbs/00_torch_core.ipynb 162
def np_func(f):
"Convert a function taking and returning numpy arrays to one taking and returning tensors"
def _inner(*args, **kwargs):
nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
return tensor(f(*nargs, **kwargs))
functools.update_wrapper(_inner, f)
return _inner
# %% ../nbs/00_torch_core.ipynb 166
| TitledTuple |
python | kamyu104__LeetCode-Solutions | Python/maximum-depth-of-n-ary-tree.py | {
"start": 146,
"end": 442
} | class ____(object):
def maxDepth(self, root):
"""
:type root: Node
:rtype: int
"""
if not root:
return 0
depth = 0
for child in root.children:
depth = max(depth, self.maxDepth(child))
return 1+depth
| Solution |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 7647,
"end": 7847
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'fips']
valid_subsets = ['fips']
fact_namespace = 'ansible_fips'
collector_class = FipsFactCollector
| TestFipsFacts |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 90513,
"end": 96248
} | class ____:
def test_lowpass(self, xp):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = ellipord(wp, xp.asarray(ws), rp, rs, False)
b, a = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert np.all(-rp - 0.1 < dB(h[w <= wp]))
assert np.all(dB(h[ws <= w]) < -rs + 0.1)
assert N == 5
xp_assert_close(Wn, xp.asarray(0.2), rtol=1e-15, check_0d=False)
def test_lowpass_1000dB(self, xp):
# failed when ellipkm1 wasn't used in ellipord and ellipap
wp = 0.2
ws = 0.3
rp = 3
rs = 1000
N, Wn = ellipord(wp, xp.asarray(ws), rp, rs, False)
sos = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'lp', False, output='sos')
w, h = freqz_sos(sos)
w /= np.pi
assert np.all(-rp - 0.1 < dB(h[w <= wp]))
assert np.all(dB(h[ws <= w]) < -rs + 0.1)
assert array_namespace(Wn) == xp
def test_highpass(self, xp):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = ellipord(wp, xp.asarray(ws), rp, rs, False)
b, a = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert np.all(-rp - 0.1 < dB(h[wp <= w]))
assert np.all(dB(h[w <= ws]) < -rs + 0.1)
assert N == 6
xp_assert_close(Wn, xp.asarray(0.3), rtol=1e-15, check_0d=False)
assert array_namespace(Wn) == xp
def test_bandpass(self, xp):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = ellipord(xp.asarray(wp), xp.asarray(ws), rp, rs, False)
b, a = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1)
assert N == 6
xp_assert_close(Wn, xp.asarray([0.2, 0.5]), rtol=1e-15)
@skip_xp_backends(
cpu_only=True, exceptions=["cupy"], reason="optimize.fminbound"
)
def test_bandstop(self, xp):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = ellipord(xp.asarray(wp), xp.asarray(ws), rp, rs, False)
b, a = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1)
assert N == 7
xp_assert_close(Wn, xp.asarray([0.14758232794342988, 0.6]), rtol=1e-5)
def test_analog(self, xp):
wp = [1000.0, 6000]
ws = [2000.0, 5000]
rp = 3
rs = 90
N, Wn = ellipord(xp.asarray(wp), xp.asarray(ws), rp, rs, True)
b, a = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'bs', True)
w, h = freqs(b, a)
assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1)
assert N == 8
xp_assert_close(Wn, xp.asarray([1666.6666, 6000]))
assert ellipord(xp.asarray(1), 1.2, 1, 80, analog=True)[0] == 9
def test_fs_param(self, xp):
wp = [400.0, 2400]
ws = [800.0, 2000]
rp = 3
rs = 90
fs = 8000
N, Wn = ellipord(xp.asarray(wp), xp.asarray(ws), rp, rs, False, fs=fs)
b, a = ellip(N, rp, rs, _xp_copy_to_numpy(Wn), 'bs', False, fs=fs)
w, h = freqz(b, a, fs=fs)
assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1)
assert N == 7
xp_assert_close(Wn, xp.asarray([590.3293117737195, 2400]), rtol=1e-5)
def test_invalid_input(self):
with pytest.raises(ValueError) as exc_info:
ellipord(0.2, 0.5, 3, 2)
assert "gpass should be smaller than gstop" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
ellipord(0.2, 0.5, -1, 2)
assert "gpass should be larger than 0.0" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
ellipord(0.2, 0.5, 1, -2)
assert "gstop should be larger than 0.0" in str(exc_info.value)
def test_ellip_butter(self, xp):
# The purpose of the test is to compare to some known output from past
# scipy versions. The values to compare to are generated with scipy
# 1.9.1 (there is nothing special about this particular version though)
n, Wn = ellipord(xp.asarray([0.1, 0.6]), xp.asarray([0.2, 0.5]), 3, 60)
assert array_namespace(Wn) == xp
assert n == 5
def test_fs_validation(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
with pytest.raises(ValueError, match="Sampling.*single scalar"):
ellipord(wp, ws, rp, rs, False, fs=np.array([10, 20]))
# Currently the filter functions tested below (bessel, butter, cheby1, cheby2,
# and ellip) all return float64 (or complex128) output regardless of input
# dtype. Therefore reference arrays in these tests are all given an explicit 64
# bit dtype, because the output will not match the xp_default_dtype when the
# default dtype is float32. Although the output arrays and all internal
# calculations are in 64 bit precision, tolerances are still loosened for the
# float32 case when results are impacted by reduced precision in the inputs.
@skip_xp_backends("dask.array", reason="https://github.com/dask/dask/issues/11883")
@make_xp_test_case(bessel)
| TestEllipord |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/embeddings/adapter_utils.py | {
"start": 392,
"end": 5013
} | class ____(nn.Module):
"""
Multiple negatives ranking loss.
This loss is similar to the one in sentence_transformers,
but optimized for our own embeddings.
"""
def __init__(
self,
model: BaseAdapter,
scale: float = 20.0,
similarity_fct: Optional[Callable] = None,
):
"""Define ranking loss."""
super().__init__()
self.model = model
self.scale = scale
self.similarity_fct = cos_sim if similarity_fct is None else similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, query_embeds: Tensor, context_embeds: Tensor) -> Tensor:
"""Forward pass."""
# transform context embeds
# context_embeds_2 = self.model.forward(context_embeds)
query_embeds_2 = self.model.forward(query_embeds)
scores = self.similarity_fct(query_embeds_2, context_embeds) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
)
return self.cross_entropy_loss(scores, labels)
def train_model(
model: BaseAdapter,
data_loader: torch.utils.data.DataLoader,
device: torch.device,
epochs: int = 1,
steps_per_epoch: Optional[int] = None,
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = torch.optim.AdamW,
optimizer_params: Dict[str, Any] = {"lr": 2e-5},
output_path: str = "model_output",
max_grad_norm: float = 1,
show_progress_bar: bool = True,
verbose: bool = False,
# callback: Callable[[float, int, int], None] = None,
# scheduler: str = "WarmupLinear",
# weight_decay: float = 0.01,
# evaluation_steps: int = 0,
# save_best_model: bool = True,
# use_amp: bool = False, # disable this option for now
checkpoint_path: Optional[str] = None,
checkpoint_save_steps: int = 500,
# checkpoint_save_total_limit: int = 0,
) -> None:
"""Train model."""
model.to(device)
# TODO: hardcode loss now, make customizable later
loss_model = MyMultipleNegativesRankingLoss(model=model)
loss_model.to(device)
# prepare optimizer/scheduler
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters: List[Dict[str, Any]] = [
{
"params": [p for n, p in param_optimizer],
},
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = len(data_loader)
num_train_steps = int(steps_per_epoch * epochs)
scheduler_obj = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_steps
)
if verbose:
print_text("> Prepared optimizer, scheduler, and loss model.\n", color="blue")
global_step = 0
data_iterator = iter(data_loader)
# if checkpoint_path is specified, create if doesn't exist
if checkpoint_path is not None:
Path(checkpoint_path).mkdir(parents=True, exist_ok=True)
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
loss_model.zero_grad()
loss_model.train()
for _ in trange(
steps_per_epoch,
desc="Iteration",
smoothing=0.05,
disable=not show_progress_bar,
):
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(data_loader)
data = next(data_iterator)
query, context = data
context = context.to(device)
query = query.to(device)
loss_value = loss_model(query, context)
if verbose:
print_text(
f"> [Epoch {epoch}] Current loss: {loss_value}\n", color="blue"
)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
scheduler_obj.step()
training_steps += 1
global_step += 1
# TODO: skip eval for now
if checkpoint_path is not None and global_step % checkpoint_save_steps == 0:
full_ck_path = Path(checkpoint_path) / f"step_{global_step}"
model.save(str(full_ck_path))
if verbose:
print_text(f"> Finished training, saving to {output_path}\n", color="blue")
# save model
model.save(output_path)
| MyMultipleNegativesRankingLoss |
python | django__django | tests/model_fields/models.py | {
"start": 18757,
"end": 19126
} | class ____(models.Model):
name = models.CharField(max_length=10, null=True)
lower_name = models.GeneratedField(
expression=Lower("name"),
output_field=models.CharField(max_length=10),
db_persist=False,
null=True,
)
class Meta:
required_db_features = {"supports_virtual_generated_columns"}
| GeneratedModelNullVirtual |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 42468,
"end": 44192
} | class ____(Elemwise):
_parameters = ["frame", "columns"]
@functools.cached_property
def unique_partition_mapping_columns_from_shuffle(self):
result = set()
columns = self.operand("columns")
for elem in self.frame.unique_partition_mapping_columns_from_shuffle:
if isinstance(elem, tuple):
subset = self.frame._meta[list(elem)].rename(columns=columns)
result.add(tuple(subset.columns))
else:
# scalar
subset = self.frame._meta[[elem]]
result.add(subset.columns[0])
return result
@staticmethod
def operation(df, columns):
return df.rename(columns=columns)
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection) and isinstance(
self.operand("columns"), Mapping
):
reverse_mapping = {val: key for key, val in self.operand("columns").items()}
columns = determine_column_projection(self, parent, dependents)
columns = _convert_to_list(columns)
frame_columns = set(self.frame.columns)
columns = [
(
reverse_mapping[col]
if col in reverse_mapping and reverse_mapping[col] in frame_columns
else col
)
for col in columns
]
columns = [col for col in self.frame.columns if col in columns]
if columns == self.frame.columns:
return
return type(parent)(
type(self)(self.frame[columns], *self.operands[1:]),
*parent.operands[1:],
)
| RenameFrame |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 6873,
"end": 7265
} | class ____(_UnicodeFixture, fixtures.TablesTest):
__requires__ = "unicode_data", "text_type"
__backend__ = True
datatype = UnicodeText()
@requirements.empty_strings_text
def test_empty_strings_text(self, connection):
self._test_empty_strings(connection)
def test_null_strings_text(self, connection):
self._test_null_strings(connection)
| UnicodeTextTest |
python | apache__airflow | airflow-core/src/airflow/models/taskmap.py | {
"start": 1950,
"end": 10856
} | class ____(TaskInstanceDependencies):
"""
Model to track dynamic task-mapping information.
This is currently only populated by an upstream TaskInstance pushing an
XCom that's pulled by a downstream for mapping purposes.
"""
__tablename__ = "task_map"
# Link to upstream TaskInstance creating this dynamic mapping information.
dag_id: Mapped[str] = mapped_column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
task_id: Mapped[str] = mapped_column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
run_id: Mapped[str] = mapped_column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
map_index: Mapped[int] = mapped_column(Integer, primary_key=True)
length: Mapped[int] = mapped_column(Integer, nullable=False)
keys: Mapped[list | None] = mapped_column(ExtendedJSON, nullable=True)
__table_args__ = (
CheckConstraint(length >= 0, name="task_map_length_not_negative"),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_map_task_instance_fkey",
ondelete="CASCADE",
onupdate="CASCADE",
),
)
def __init__(
self,
dag_id: str,
task_id: str,
run_id: str,
map_index: int,
length: int,
keys: list[Any] | None,
) -> None:
self.dag_id = dag_id
self.task_id = task_id
self.run_id = run_id
self.map_index = map_index
self.length = length
self.keys = keys
@classmethod
def from_task_instance_xcom(cls, ti: TaskInstance, value: Collection) -> TaskMap:
if ti.run_id is None:
raise ValueError("cannot record task map for unrun task instance")
return cls(
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=ti.run_id,
map_index=ti.map_index,
length=len(value),
keys=(list(value) if isinstance(value, collections.abc.Mapping) else None),
)
@property
def variant(self) -> TaskMapVariant:
if self.keys is None:
return TaskMapVariant.LIST
return TaskMapVariant.DICT
@classmethod
def expand_mapped_task(
cls,
task: SerializedBaseOperator | MappedOperator,
run_id: str,
*,
session: Session,
) -> tuple[Sequence[TaskInstance], int]:
"""
Create the mapped task instances for mapped task.
:raise NotMapped: If this task does not need expansion.
:return: The newly created mapped task instances (if any) in ascending
order by map index, and the maximum map index value.
"""
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.mappedoperator import MappedOperator, get_mapped_ti_count
from airflow.models.taskinstance import TaskInstance
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.settings import task_instance_mutation_hook
if not isinstance(task, (MappedOperator, SerializedBaseOperator)):
raise RuntimeError(
f"cannot expand unrecognized operator type {type(task).__module__}.{type(task).__name__}"
)
try:
total_length: int | None = get_mapped_ti_count(task, run_id, session=session)
except NotFullyPopulated as e:
if not task.dag or not task.dag.partial:
task.log.error(
"Cannot expand %r for run %s; missing upstream values: %s",
task,
run_id,
sorted(e.missing),
)
total_length = None
state: str | None = None
unmapped_ti: TaskInstance | None = session.scalars(
select(TaskInstance).where(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == -1,
or_(TaskInstance.state.in_(State.unfinished), TaskInstance.state.is_(None)),
)
).one_or_none()
all_expanded_tis: list[TaskInstance] = []
if unmapped_ti:
if TYPE_CHECKING:
assert task.dag is None
# The unmapped task instance still exists and is unfinished, i.e. we
# haven't tried to run it before.
if total_length is None:
# If the DAG is partial, it's likely that the upstream tasks
# are not done yet, so the task can't fail yet.
if not task.dag or not task.dag.partial:
unmapped_ti.state = TaskInstanceState.UPSTREAM_FAILED
elif total_length < 1:
# If the upstream maps this to a zero-length value, simply mark
# the unmapped task instance as SKIPPED (if needed).
task.log.info(
"Marking %s as SKIPPED since the map has %d values to expand",
unmapped_ti,
total_length,
)
unmapped_ti.state = TaskInstanceState.SKIPPED
else:
zero_index_ti_exists = exists_query(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == 0,
session=session,
)
if not zero_index_ti_exists:
# Otherwise convert this into the first mapped index, and create
# TaskInstance for other indexes.
unmapped_ti.map_index = 0
task.log.debug("Updated in place to become %s", unmapped_ti)
all_expanded_tis.append(unmapped_ti)
# execute hook for task instance map index 0
task_instance_mutation_hook(unmapped_ti)
session.flush()
else:
task.log.debug("Deleting the original task instance: %s", unmapped_ti)
session.delete(unmapped_ti)
state = unmapped_ti.state
dag_version_id = unmapped_ti.dag_version_id
if total_length is None or total_length < 1:
# Nothing to fixup.
indexes_to_map: Iterable[int] = ()
else:
# Only create "missing" ones.
current_max_mapping = (
session.scalar(
select(func.max(TaskInstance.map_index)).where(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.run_id == run_id,
)
)
or 0
)
indexes_to_map = range(current_max_mapping + 1, total_length)
if unmapped_ti:
dag_version_id = unmapped_ti.dag_version_id
elif dag_version := DagVersion.get_latest_version(task.dag_id, session=session):
dag_version_id = dag_version.id
else:
dag_version_id = None
for index in indexes_to_map:
# TODO: Make more efficient with bulk_insert_mappings/bulk_save_mappings.
ti = TaskInstance(
task,
run_id=run_id,
map_index=index,
state=state,
dag_version_id=dag_version_id,
)
task.log.debug("Expanding TIs upserted %s", ti)
task_instance_mutation_hook(ti)
ti = session.merge(ti)
ti.refresh_from_task(task) # session.merge() loses task information.
all_expanded_tis.append(ti)
# Coerce the None case to 0 -- these two are almost treated identically,
# except the unmapped ti (if exists) is marked to different states.
total_expanded_ti_count = total_length or 0
# Any (old) task instances with inapplicable indexes (>= the total
# number we need) are set to "REMOVED".
query = select(TaskInstance).where(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index >= total_expanded_ti_count,
)
to_update = session.scalars(with_row_locks(query, of=TaskInstance, session=session, skip_locked=True))
for ti in to_update:
ti.state = TaskInstanceState.REMOVED
session.flush()
return all_expanded_tis, total_expanded_ti_count - 1
| TaskMap |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 2398,
"end": 11204
} | class ____(TestCase):
def setUp(self):
User.objects.create_user('disallowed', 'disallowed@example.com', 'password')
user = User.objects.create_user('permitted', 'permitted@example.com', 'password')
user.user_permissions.set([
Permission.objects.get(codename='add_basicmodel'),
Permission.objects.get(codename='change_basicmodel'),
Permission.objects.get(codename='delete_basicmodel')
])
user = User.objects.create_user('updateonly', 'updateonly@example.com', 'password')
user.user_permissions.set([
Permission.objects.get(codename='change_basicmodel'),
])
self.permitted_credentials = basic_auth_header('permitted', 'password')
self.disallowed_credentials = basic_auth_header('disallowed', 'password')
self.updateonly_credentials = basic_auth_header('updateonly', 'password')
BasicModel(text='foo').save()
def test_has_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_api_root_view_discard_default_django_model_permission(self):
"""
We check that DEFAULT_PERMISSION_CLASSES can
apply to APIRoot view. More specifically we check expected behavior of
``_ignore_model_permissions`` attribute support.
"""
request = factory.get('/', format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
request.resolver_match = ResolverMatch('get', (), {})
response = api_root_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_ignore_model_permissions_with_unauthenticated_user(self):
"""
We check that the ``_ignore_model_permissions`` attribute
doesn't ignore the authentication.
"""
request = factory.get('/', format='json')
request.resolver_match = ResolverMatch('get', (), {})
response = ignored_get_queryset_list_view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_ignore_model_permissions_with_authenticated_user(self):
"""
We check that the ``_ignore_model_permissions`` attribute
with an authenticated user.
"""
request = factory.get('/', format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
request.resolver_match = ResolverMatch('get', (), {})
response = ignored_get_queryset_list_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_queryset_has_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = get_queryset_list_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_has_put_permissions(self):
request = factory.put('/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_has_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_does_not_have_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_put_permissions(self):
request = factory.put('/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_permitted(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.permitted_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions']), ['POST'])
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.permitted_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions']), ['PUT'])
def test_options_disallowed(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.disallowed_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.disallowed_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
def test_options_updateonly(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.updateonly_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.updateonly_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions']), ['PUT'])
def test_empty_view_does_not_assert(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = empty_list_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_calling_method_not_allowed(self):
request = factory.generic('METHOD_NOT_ALLOWED', '/', HTTP_AUTHORIZATION=self.permitted_credentials)
response = root_view(request)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
request = factory.generic('METHOD_NOT_ALLOWED', '/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_check_auth_before_queryset_call(self):
class View(RootView):
def get_queryset(_):
self.fail('should not reach due to auth check')
view = View.as_view()
request = factory.get('/', HTTP_AUTHORIZATION='')
response = view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_queryset_assertions(self):
class View(views.APIView):
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
view = View.as_view()
request = factory.get('/', HTTP_AUTHORIZATION=self.permitted_credentials)
msg = 'Cannot apply DjangoModelPermissions on a view that does not set `.queryset` or have a `.get_queryset()` method.'
with self.assertRaisesMessage(AssertionError, msg):
view(request)
# Faulty `get_queryset()` methods should trigger the above "view does not have a queryset" assertion.
class View(RootView):
def get_queryset(self):
return None
view = View.as_view()
request = factory.get('/', HTTP_AUTHORIZATION=self.permitted_credentials)
with self.assertRaisesMessage(AssertionError, 'View.get_queryset() returned None'):
view(request)
| ModelPermissionsIntegrationTests |
python | scipy__scipy | scipy/io/matlab/tests/test_streams.py | {
"start": 2258,
"end": 7543
} | class ____:
def _get_data(self, size):
data = random.randbytes(size)
compressed_data = zlib.compress(data)
stream = BytesIO(compressed_data)
return stream, len(compressed_data), data
def test_read(self):
SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1,
BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1]
READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1,
BLOCK_SIZE, BLOCK_SIZE+1]
def check(size, read_size):
compressed_stream, compressed_data_len, data = self._get_data(size)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
data2 = b''
so_far = 0
while True:
block = stream.read(min(read_size,
size - so_far))
if not block:
break
so_far += len(block)
data2 += block
assert_equal(data, data2)
for size in SIZES:
for read_size in READ_SIZES:
check(size, read_size)
def test_read_max_length(self):
data = random.randbytes(1234)
compressed_data = zlib.compress(data)
compressed_stream = BytesIO(compressed_data + b"abbacaca")
stream = ZlibInputStream(compressed_stream, len(compressed_data))
stream.read(len(data))
assert_equal(compressed_stream.tell(), len(compressed_data))
assert_raises(OSError, stream.read, 1)
def test_read_bad_checksum(self):
data = random.randbytes(10)
compressed_data = zlib.compress(data)
# break checksum
compressed_data = (compressed_data[:-1]
+ bytes([(compressed_data[-1] + 1) & 255]))
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, len(compressed_data))
assert_raises(zlib.error, stream.read, len(data))
def test_seek(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
stream.seek(123)
p = 123
assert_equal(stream.tell(), p)
d1 = stream.read(11)
assert_equal(d1, data[p:p+11])
stream.seek(321, 1)
p = 123+11+321
assert_equal(stream.tell(), p)
d2 = stream.read(21)
assert_equal(d2, data[p:p+21])
stream.seek(641, 0)
p = 641
assert_equal(stream.tell(), p)
d3 = stream.read(11)
assert_equal(d3, data[p:p+11])
assert_raises(OSError, stream.seek, 10, 2)
assert_raises(OSError, stream.seek, -1, 1)
assert_raises(ValueError, stream.seek, 1, 123)
stream.seek(10000, 1)
assert_raises(OSError, stream.read, 12)
def test_seek_bad_checksum(self):
data = random.randbytes(10)
compressed_data = zlib.compress(data)
# break checksum
compressed_data = (compressed_data[:-1]
+ bytes([(compressed_data[-1] + 1) & 255]))
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, len(compressed_data))
assert_raises(zlib.error, stream.seek, len(data))
def test_all_data_read(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(512)
assert_(not stream.all_data_read())
stream.seek(1024)
assert_(stream.all_data_read())
@pytest.mark.skipif(
(platform.system() == 'Windows' and sys.version_info >= (3, 14)),
reason='gh-23185')
def test_all_data_read_overlap(self):
COMPRESSION_LEVEL = 6
data = np.arange(33707000, dtype=np.uint8)
compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
compressed_data_len = len(compressed_data)
# check that part of the checksum overlaps
assert_(compressed_data_len == BLOCK_SIZE + 2)
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(len(data))
assert_(stream.all_data_read())
@pytest.mark.skipif(
(platform.system() == 'Windows' and sys.version_info >= (3, 14)),
reason='gh-23185')
def test_all_data_read_bad_checksum(self):
COMPRESSION_LEVEL = 6
data = np.arange(33707000, dtype=np.uint8)
compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
compressed_data_len = len(compressed_data)
# check that part of the checksum overlaps
assert_(compressed_data_len == BLOCK_SIZE + 2)
# break checksum
compressed_data = (compressed_data[:-1]
+ bytes([(compressed_data[-1] + 1) & 255]))
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(len(data))
assert_raises(zlib.error, stream.all_data_read)
| TestZlibInputStream |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/record/__init__.py | {
"start": 16822,
"end": 17407
} | class ____(ABC):
"""Mixin to ease migration by adding NamedTuple utility methods.
Inherit when converting an existing NamedTuple that has callsites to _replace / _asdict, ie.
@record
def AssetSubset(LegacyNamedTupleMixin):
asset_key: AssetKey
value: Union[bool, PartitionsSubset]
"""
def _replace(self, **kwargs) -> Self:
return replace(self, **kwargs)
def _asdict(self) -> Mapping[str, Any]:
return as_dict(self)
def __iter__(self) -> Iterator:
return tuple.__iter__(self) # type: ignore
| LegacyNamedTupleMixin |
python | modin-project__modin | modin/tests/pandas/extensions/test_series_extensions.py | {
"start": 4689,
"end": 5808
} | class ____:
"""
Make sure to test that we override special "dunder" methods like __len__
correctly. python calls these methods with DataFrame.__len__(obj)
rather than getattr(obj, "__len__")().
source: https://docs.python.org/3/reference/datamodel.html#special-lookup
"""
def test_len(self, Backend1):
@register_series_accessor(name="__len__", backend=Backend1)
def always_get_1(self):
return 1
series = pd.Series([1, 2, 3])
assert len(series) == 3
backend_series = series.set_backend(Backend1)
assert len(backend_series) == 1
assert backend_series.__len__() == 1
def test_repr(self, Backend1):
@register_series_accessor(name="__repr__", backend=Backend1)
def simple_repr(self) -> str:
return "series_string"
series = pd.Series([1, 2, 3])
assert repr(series) == repr(series.modin.to_pandas())
backend_series = series.set_backend(Backend1)
assert repr(backend_series) == "series_string"
assert backend_series.__repr__() == "series_string"
| TestDunders |
python | ansible__ansible | test/units/_internal/templating/test_template_utilities.py | {
"start": 1104,
"end": 2695
} | class ____(unittest.TestCase):
test_data = (
# Test backslashes in a filter arg are double escaped
dict(
template=u"{{ 'test2 %s' | format('\\1') }}",
expectation=u"test2 \\1",
args=dict()
),
# Test backslashes inside the jinja2 var itself are double
# escaped
dict(
template=u"Test 2\\3: {{ '\\1 %s' | format('\\2') }}",
expectation=u"Test 2\\3: \\1 \\2",
args=dict()
),
# Test backslashes outside of the jinja2 var are not double
# escaped
dict(
template=u"Test 2\\3: {{ 'test2 %s' | format('\\1') }}; \\done",
expectation=u"Test 2\\3: test2 \\1; \\done",
args=dict()
),
# Test backslashes in a variable sent to a filter are handled
dict(
template=u"{{ 'test2 %s' | format(var1) }}",
expectation=u"test2 \\1",
args=dict(var1=u'\\1')
),
# Test backslashes in a variable expanded by jinja2 are double
# escaped
dict(
template=u"Test 2\\3: {{ var1 | format('\\2') }}",
expectation=u"Test 2\\3: \\1 \\2",
args=dict(var1=u'\\1 %s')
),
)
def setUp(self):
self.env = AnsibleEnvironment()
def test_backslash_escaping(self):
for test in self.test_data:
templar = TemplateEngine(None, test['args'])
self.assertEqual(templar.template(TrustedAsTemplate().tag(test['template'])), test['expectation'])
| TestBackslashEscape |
python | sympy__sympy | sympy/polys/polytools.py | {
"start": 124933,
"end": 211950
} | class ____(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super().__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
{y}
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("Cannot unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("Cannot unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("Cannot unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
rep, opt = _dict_from_expr(expr, opt)
if not opt.gens:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
reps, opt = _parallel_dict_from_expr(exprs, opt)
if not opt.gens:
raise PolificationFailed(opt, origs, exprs, True)
from sympy.functions.elementary.piecewise import Piecewise
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, gen=0):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
See also
========
sympy.polys.polytools.Poly.total_degree
degree_list
"""
f = sympify(f, strict=True)
gen_is_Num = sympify(gen, strict=True).is_Number
if f.is_Poly:
p = f
isNum = p.as_expr().is_Number
else:
isNum = f.is_Number
if not isNum:
if gen_is_Num:
p, _ = poly_from_expr(f)
else:
p, _ = poly_from_expr(f, gen)
if isNum:
return S.Zero if f else S.NegativeInfinity
if not gen_is_Num:
if f.is_Poly and gen not in p.gens:
# try recast without explicit gens
p, _ = poly_from_expr(f.as_expr())
if gen not in p.gens:
return S.Zero
elif not f.is_Poly and len(f.free_symbols) > 1:
raise TypeError(filldedent('''
A symbolic generator of interest is required for a multivariate
expression like func = %s, e.g. degree(func, gen = %s) instead of
degree(func, gen = %s).
''' % (f, next(ordered(f.free_symbols)), gen)))
result = p.degree(gen)
return Integer(result) if isinstance(result, int) else S.NegativeInfinity
@public
def total_degree(f, *gens):
"""
Return the total_degree of ``f`` in the given variables.
Examples
========
>>> from sympy import total_degree, Poly
>>> from sympy.abc import x, y
>>> total_degree(1)
0
>>> total_degree(x + x*y)
2
>>> total_degree(x + x*y, x)
1
If the expression is a Poly and no variables are given
then the generators of the Poly will be used:
>>> p = Poly(x + x*y, y)
>>> total_degree(p)
1
To deal with the underlying expression of the Poly, convert
it to an Expr:
>>> total_degree(p.as_expr())
2
This is done automatically if any variables are given:
>>> total_degree(p, x)
1
See also
========
degree
"""
p = sympify(f)
if p.is_Poly:
p = p.as_expr()
if p.is_Number:
rv = 0
else:
if f.is_Poly:
gens = gens or f.gens
rv = Poly(p, gens).total_degree()
return Integer(rv)
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(3/5 - x/5, x + 1)
See Also
========
sympy.polys.polytools.gcdex:
Extended Euclidean algorithm.
sympy.polys.polytools.gcdex_steps:
Intermediate steps of the Extended Euclidean algorithm.
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
def _gcdex_steps_domain(a, b, K):
"""
Generator for intermediate steps in the extended euclidean algorithm
on domain elements. Helper function for `gcdex_steps`.
"""
if not K.is_PID:
raise DomainError("gcdex_steps is only for Euclidean domains")
s1, s2 = K.one, K.zero
t1, t2 = K.zero, K.one
while b:
yield s2, t2, b
quotient, remainder = K.div(a, b)
a, b = b, remainder
s1, s2 = s2, s1 - quotient * s2
t1, t2 = t2, t1 - quotient * t2
def _gcdex_steps_polynomial(f, g, auto):
"""
Generator for intermediate steps of the extended euclidean algorithm
on polynomials. Helper function for `gcdex_steps`.
"""
if auto and f.domain.is_Ring:
f, g = f.to_field(), g.to_field()
if not f.domain.is_Field:
raise DomainError("gcdex_steps is only for Euclidean domains")
if not f.is_univariate:
raise ValueError('univariate polynomial expected')
s1, s2 = f.one, f.zero
t1, t2 = f.zero, f.one
r1, r2 = f, g
if f.degree() < g.degree():
s1, t1 = t1, s1
s2, t2 = t2, s2
r1, r2 = r2, r1
for _ in range(max(f.degree(), g.degree()) + 1):
yield s2, t2, r2
quotient, remainder = divmod(r1, r2)
if remainder == 0:
break
r1, r2 = r2, remainder
s1, s2 = s2, s1 - quotient * s2
t1, t2 = t2, t1 - quotient * t2
@overload
def gcdex_steps(
f: Expr, g: Expr, *gens: Expr, polys: Literal[False] = False, **args: Any
) -> Iterator[tuple[Expr, Expr, Expr]]:
...
@overload
def gcdex_steps(
f: Expr, g: Expr, *gens: Expr, polys: Literal[True], **args: Any
) -> Iterator[tuple[Poly, Poly, Poly]]:
...
@overload
def gcdex_steps(
f: Poly, g: Poly, *gens: Expr, **args: Any
) -> Iterator[tuple[Poly, Poly, Poly]]:
...
@public
def gcdex_steps(
f: Expr | Poly, g: Expr | Poly, *gens: Expr, **args: Any
) -> Iterator[tuple[Expr, Expr, Expr]] | Iterator[tuple[Poly, Poly, Poly]]:
"""
Generator for intermediate steps in the extended Euclidean algorithm.
Description
===========
Returns a generator to three polynomial sequences `s`, `t`, and `r` that
enumerate all solutions apart from the trivial `(s, t, r) = (1, 0, f)`,
and `(g, -f, 0)` (up to multiplicative constants) to the following
conditions::
f*s[i] + g*t[i] = r[i],
r[i].deg() > r[i + 1].deg()
In particular, the final value of `r = gcd(f, g)`, the greatest common
divisor of `f` and `g`.
The sequences `s`, `t`, and `r` also have the following properties (see
ref. [1] McEliece and Shearer)::
t[i]*r[i-1] - t[i-1]*r[i] = (-1)**i*f
s[i]*r[i-1] - s[i-1]*r[i] = (-1)**(i+1)*g
s[i]*t[i-1]- s[i-1]*t[i] = (-1)**(i+1)
s[i].degree() + r[i-1].degree() = b.degree()
t[i].degree() + r[i-1].degree() = a.degree()
Parameters
==========
f : Poly
The first polynomial.
g : Poly
The second polynomial,
Returns
=======
steps : Iterator[tuple[Poly, Poly, Poly]] | Iterator[tuple[Expr, Expr, Expr]]
A generator to the sequences `s`, `t`, and `r`
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import simplify
>>> from sympy.polys.polytools import gcdex_steps
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> for step in gcdex_steps(f, g): print(step)
(0, 1, x**3 + x**2 - 4*x - 4)
(1, 3 - x, x**2 + 4*x + 3)
(3 - x, x**2 - 6*x + 10, 5*x + 5)
Each step `(s, t, r)` satisfies `f*s + g*t = r`
>>> for s, t, r in gcdex_steps(f, g): print(simplify(f*s + g*t - r))
0
0
0
The final output of `gcdex_steps(f, g)` is equivalent to `gcdex(f, g)`
>>> from sympy.polys.polytools import gcdex
>>> gcdex(f, g)
(3/5 - x/5, x**2/5 - 6*x/5 + 2, x + 1)
For multivariate polynomials, the variable must be specified. This example
treats the polynomials as univariate polynomials over `x`
>>> f = x**2*y - 2*x*y**2 + 1
>>> g = x + 2*y
>>> for step in gcdex_steps(f, g, gens=x): print(step)
(0, 1, x + 2*y)
(1, -x*y + 4*y**2, 8*y**3 + 1)
This example treats the same polynomials as univariate polynomials over `y`
>>> for step in gcdex_steps(f, g, gens=y): print(step)
(0, 1, x + 2*y)
(1, -x**2 + x*y, 1 - x**3)
See Also
========
sympy.polys.polytools.gcdex:
Extended Euclidean algorithm witout intermediate steps.
sympy.polys.polytools.half_gcdex:
Half extended Euclidean algorithm.
References
==========
.. [1] McEliece, R. J., & Shearer, J. B. (1978). A Property of Euclid's
Algorithm and an application to Pade Approximation. SIAM Journal on
Applied Mathematics, 34(4), 611-615. doi:10.1137/0134048
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
for s, t, r in _gcdex_steps_domain(a, b, domain):
yield domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(r)
return
except DomainError as exc:
raise ComputationFailed('gcdex_steps', 2, exc)
for s, t, r in _gcdex_steps_polynomial(F, G, auto=opt.auto):
if opt.polys:
yield s, t, r
else:
yield s.as_expr(), t.as_expr(), r.as_expr()
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(3/5 - x/5, x**2/5 - 6*x/5 + 2, x + 1)
See also
========
sympy.polys.polytools.half_gcdex:
Half extended Euclidean algorithm.
sympy.polys.polytools.gcdex_steps:
Intermediate steps of the extended Euclidean algorithm.
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S, mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the :obj:`sympy.core.intfunc.mod_inverse` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.intfunc.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, includePRS=False, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
# gcd for domain Q[irrational] (purely algebraic irrational)
if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq):
a = seq[-1]
lst = [ (a/elt).ratsimp() for elt in seq[:-1] ]
if all(frc.is_rational for frc in lst):
lc = 1
for frc in lst:
lc = lcm(lc, frc.as_numer_denom()[0])
# abs ensures that the gcd is always non-negative
return abs(a/lc)
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
# gcd for domain Q[irrational] (purely algebraic irrational)
a, b = map(sympify, (f, g))
if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational:
frc = (a/b).ratsimp()
if frc.is_rational:
# abs ensures that the returned gcd is always non-negative
return abs(a/frc.as_numer_denom()[0])
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq) -> Optional[Expr]:
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.to_sympy(domain.one)
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
# lcm for domain Q[irrational] (purely algebraic irrational)
if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq):
a = seq[-1]
lst = [ (a/elt).ratsimp() for elt in seq[:-1] ]
if all(frc.is_rational for frc in lst):
lc = 1
for frc in lst:
lc = lcm(lc, frc.as_numer_denom()[1])
return a*lc
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
# lcm for domain Q[irrational] (purely algebraic irrational)
a, b = map(sympify, (f, g))
if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational:
frc = (a/b).ratsimp()
if frc.is_rational:
return a*frc.as_numer_denom()[1]
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
orig = sympify(f)
if isinstance(f, Equality):
return Equality(*(terms_gcd(s, *gens, **args) for s in [f.lhs, f.rhs]))
elif isinstance(f, Relational):
raise TypeError("Inequalities cannot be used with terms_gcd. Found: %s" %(f,))
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.is_Ring:
if opt.domain.is_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.is_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if equal_valued(coeff, 1):
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Note that the input to ff() and rf() should be Poly instances to use the
definitions here.
Examples
========
>>> from sympy import gff_list, ff, Poly
>>> from sympy.abc import x
>>> f = Poly(x**5 + 2*x**4 - x**3 - 2*x**2, x)
>>> gff_list(f)
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
>>> (ff(Poly(x), 1)*ff(Poly(x + 2), 4)) == f
True
>>> f = Poly(x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x, x)
>>> gff_list(f)
[(Poly(x**3 + 7, x, domain='ZZ'), 2), (Poly(x**2 + 5*x, x, domain='ZZ'), 3)]
>>> ff(Poly(x**3 + 7, x), 2)*ff(Poly(x**2 + 5*x, x), 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
([1], x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
s_expr = [Integer(si) for si in s]
if not opt.polys:
return s_expr, g.as_expr(), r.as_expr()
else:
return s_expr, g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _poly_sort_key(poly):
"""Sort a list of polys."""
rep = poly.rep.to_list()
return (len(rep), len(poly.gens), str(poly.domain), rep)
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.to_list()
return (exp, len(rep), len(poly.gens), str(poly.domain), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.to_list()
return (len(rep), len(poly.gens), exp, str(poly.domain), rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number or (isinstance(arg, Expr) and pure_complex(arg)):
coeff *= arg
continue
elif arg.is_Pow and arg.base != S.Exp1:
base, exp = arg.args
if base.is_Number and exp.is_Number:
coeff *= arg
continue
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
if method == 'sqf':
factors = [(reduce(mul, (f for f, _ in factors if _ == k)), k)
for k in {i for _, i in factors}]
#collect duplicates
rv = defaultdict(int)
for k, v in factors:
rv[k] += v
return coeff, list(rv.items())
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr):
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr, fraction=opt['fraction']), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, (Expr, Poly)):
if isinstance(expr, Poly):
numer, denom = expr, 1
else:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone({"expand": True})
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
fraction = args.pop('fraction', True)
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
opt['fraction'] = fraction
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), 2 - 2*sqrt(2))
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if len(coeffs) > 1 and coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c.is_Add and not c.is_rational:
rat, nonrat = sift(c.args,
lambda z: z.is_rational is True, binary=True)
alpha = -c.func(*nonrat)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, deep=False, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`~.Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt, exp
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
If the ``fraction`` flag is False then rational expressions
will not be combined. By default it is True.
>>> factor(5*x + 3*exp(2 - 7*x), deep=True)
(5*x*exp(7*x) + 3*exp(2))*exp(-7*x)
>>> factor(5*x + 3*exp(2 - 7*x), deep=True, fraction=False)
5*x + 3*exp(2)*exp(-7*x)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if deep:
def _try_factor(expr):
"""
Factor, but avoid changing the expression when unable to.
"""
fac = factor(expr, *gens, **args)
if fac.is_Mul or fac.is_Pow:
return fac
return expr
f = bottom_up(f, _try_factor)
# clean up any subexpressions that may have been expanded
# while factoring out a larger expression
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError:
if not f.is_commutative:
return factor_nc(f)
else:
raise
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.to_list()
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except GeneratorsNeeded:
raise PolynomialError(
"Cannot refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except GeneratorsNeeded:
raise PolynomialError("Cannot count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def all_roots(f, multiple=True, radicals=True, extension=False):
"""
Returns the real and complex roots of ``f`` with multiplicities.
Explanation
===========
Finds all real and complex roots of a univariate polynomial with rational
coefficients of any degree exactly. The roots are represented in the form
given by :func:`~.rootof`. This is equivalent to using :func:`~.rootof` to
find each of the indexed roots.
Examples
========
>>> from sympy import all_roots
>>> from sympy.abc import x, y
>>> print(all_roots(x**3 + 1))
[-1, 1/2 - sqrt(3)*I/2, 1/2 + sqrt(3)*I/2]
Simple radical formulae are used in some cases but the cubic and quartic
formulae are avoided. Instead most non-rational roots will be represented
as :class:`~.ComplexRootOf`:
>>> print(all_roots(x**3 + x + 1))
[CRootOf(x**3 + x + 1, 0), CRootOf(x**3 + x + 1, 1), CRootOf(x**3 + x + 1, 2)]
All roots of any polynomial with rational coefficients of any degree can be
represented using :py:class:`~.ComplexRootOf`. The use of
:py:class:`~.ComplexRootOf` bypasses limitations on the availability of
radical formulae for quintic and higher degree polynomials _[1]:
>>> p = x**5 - x - 1
>>> for r in all_roots(p): print(r)
CRootOf(x**5 - x - 1, 0)
CRootOf(x**5 - x - 1, 1)
CRootOf(x**5 - x - 1, 2)
CRootOf(x**5 - x - 1, 3)
CRootOf(x**5 - x - 1, 4)
>>> [r.evalf(3) for r in all_roots(p)]
[1.17, -0.765 - 0.352*I, -0.765 + 0.352*I, 0.181 - 1.08*I, 0.181 + 1.08*I]
Irrational algebraic coefficients are handled by :func:`all_roots`
if `extension=True` is set.
>>> from sympy import sqrt, expand
>>> p = expand((x - sqrt(2))*(x - sqrt(3)))
>>> print(p)
x**2 - sqrt(3)*x - sqrt(2)*x + sqrt(6)
>>> all_roots(p)
Traceback (most recent call last):
...
NotImplementedError: sorted roots not supported over EX
>>> all_roots(p, extension=True)
[sqrt(2), sqrt(3)]
Algebraic coefficients can be complex as well.
>>> from sympy import I
>>> all_roots(x**2 - I, extension=True)
[-sqrt(2)/2 - sqrt(2)*I/2, sqrt(2)/2 + sqrt(2)*I/2]
>>> all_roots(x**2 - sqrt(2)*I, extension=True)
[-2**(3/4)/2 - 2**(3/4)*I/2, 2**(3/4)/2 + 2**(3/4)*I/2]
Transcendental coefficients cannot currently be handled by
:func:`all_roots`. In the case of algebraic or transcendental coefficients
:func:`~.ground_roots` might be able to find some roots by factorisation:
>>> from sympy import ground_roots
>>> ground_roots(p, x, extension=True)
{sqrt(2): 1, sqrt(3): 1}
If the coefficients are numeric then :func:`~.nroots` can be used to find
all roots approximately:
>>> from sympy import nroots
>>> nroots(p, 5)
[1.4142, 1.732]
If the coefficients are symbolic then :func:`sympy.polys.polyroots.roots`
or :func:`~.ground_roots` should be used instead:
>>> from sympy import roots, ground_roots
>>> p = x**2 - 3*x*y + 2*y**2
>>> roots(p, x)
{y: 1, 2*y: 1}
>>> ground_roots(p, x)
{y: 1, 2*y: 1}
Parameters
==========
f : :class:`~.Expr` or :class:`~.Poly`
A univariate polynomial with rational (or ``Float``) coefficients.
multiple : ``bool`` (default ``True``).
Whether to return a ``list`` of roots or a list of root/multiplicity
pairs.
radicals : ``bool`` (default ``True``)
Use simple radical formulae rather than :py:class:`~.ComplexRootOf` for
some irrational roots.
extension: ``bool`` (default ``False``)
Whether to construct an algebraic extension domain before computing
the roots. Setting to ``True`` is necessary for finding roots of a
polynomial with (irrational) algebraic coefficients but can be slow.
Returns
=======
A list of :class:`~.Expr` (usually :class:`~.ComplexRootOf`) representing
the roots is returned with each root repeated according to its multiplicity
as a root of ``f``. The roots are always uniquely ordered with real roots
coming before complex roots. The real roots are in increasing order.
Complex roots are ordered by increasing real part and then increasing
imaginary part.
If ``multiple=False`` is passed then a list of root/multiplicity pairs is
returned instead.
If ``radicals=False`` is passed then all roots will be represented as
either rational numbers or :class:`~.ComplexRootOf`.
See also
========
Poly.all_roots:
The underlying :class:`Poly` method used by :func:`~.all_roots`.
rootof:
Compute a single numbered root of a univariate polynomial.
real_roots:
Compute all the real roots using :func:`~.rootof`.
ground_roots:
Compute some roots in the ground domain by factorisation.
nroots:
Compute all roots using approximate numerical techniques.
sympy.polys.polyroots.roots:
Compute symbolic expressions for roots using radical formulae.
References
==========
.. [1] https://en.wikipedia.org/wiki/Abel%E2%80%93Ruffini_theorem
"""
try:
if isinstance(f, Poly):
if extension and not f.domain.is_AlgebraicField:
F = Poly(f.expr, extension=True)
else:
F = f
else:
if extension:
F = Poly(f, extension=True)
else:
F = Poly(f, greedy=False)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except GeneratorsNeeded:
raise PolynomialError(
"Cannot compute real roots of %s, not a polynomial" % f)
return F.all_roots(multiple=multiple, radicals=radicals)
@public
def real_roots(f, multiple=True, radicals=True, extension=False):
"""
Returns the real roots of ``f`` with multiplicities.
Explanation
===========
Finds all real roots of a univariate polynomial with rational coefficients
of any degree exactly. The roots are represented in the form given by
:func:`~.rootof`. This is equivalent to using :func:`~.rootof` or
:func:`~.all_roots` and filtering out only the real roots. However if only
the real roots are needed then :func:`real_roots` is more efficient than
:func:`~.all_roots` because it computes only the real roots and avoids
costly complex root isolation routines.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x, y
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4, multiple=False)
[(-1/2, 1), (2, 2)]
Real roots of any polynomial with rational coefficients of any degree can
be represented using :py:class:`~.ComplexRootOf`:
>>> p = x**9 + 2*x + 2
>>> print(real_roots(p))
[CRootOf(x**9 + 2*x + 2, 0)]
>>> [r.evalf(3) for r in real_roots(p)]
[-0.865]
All rational roots will be returned as rational numbers. Roots of some
simple factors will be expressed using radical or other formulae (unless
``radicals=False`` is passed). All other roots will be expressed as
:class:`~.ComplexRootOf`.
>>> p = (x + 7)*(x**2 - 2)*(x**3 + x + 1)
>>> print(real_roots(p))
[-7, -sqrt(2), CRootOf(x**3 + x + 1, 0), sqrt(2)]
>>> print(real_roots(p, radicals=False))
[-7, CRootOf(x**2 - 2, 0), CRootOf(x**3 + x + 1, 0), CRootOf(x**2 - 2, 1)]
All returned root expressions will numerically evaluate to real numbers
with no imaginary part. This is in contrast to the expressions generated by
the cubic or quartic formulae as used by :func:`~.roots` which suffer from
casus irreducibilis [1]_:
>>> from sympy import roots
>>> p = 2*x**3 - 9*x**2 - 6*x + 3
>>> [r.evalf(5) for r in roots(p, multiple=True)]
[5.0365 - 0.e-11*I, 0.33984 + 0.e-13*I, -0.87636 + 0.e-10*I]
>>> [r.evalf(5) for r in real_roots(p, x)]
[-0.87636, 0.33984, 5.0365]
>>> [r.is_real for r in roots(p, multiple=True)]
[None, None, None]
>>> [r.is_real for r in real_roots(p)]
[True, True, True]
Using :func:`real_roots` is equivalent to using :func:`~.all_roots` (or
:func:`~.rootof`) and filtering out only the real roots:
>>> from sympy import all_roots
>>> r = [r for r in all_roots(p) if r.is_real]
>>> real_roots(p) == r
True
If only the real roots are wanted then using :func:`real_roots` is faster
than using :func:`~.all_roots`. Using :func:`real_roots` avoids complex root
isolation which can be a lot slower than real root isolation especially for
polynomials of high degree which typically have many more complex roots
than real roots.
Irrational algebraic coefficients are handled by :func:`real_roots`
if `extension=True` is set.
>>> from sympy import sqrt, expand
>>> p = expand((x - sqrt(2))*(x - sqrt(3)))
>>> print(p)
x**2 - sqrt(3)*x - sqrt(2)*x + sqrt(6)
>>> real_roots(p)
Traceback (most recent call last):
...
NotImplementedError: sorted roots not supported over EX
>>> real_roots(p, extension=True)
[sqrt(2), sqrt(3)]
Transcendental coefficients cannot currently be handled by
:func:`real_roots`. In the case of algebraic or transcendental coefficients
:func:`~.ground_roots` might be able to find some roots by factorisation:
>>> from sympy import ground_roots
>>> ground_roots(p, x, extension=True)
{sqrt(2): 1, sqrt(3): 1}
If the coefficients are numeric then :func:`~.nroots` can be used to find
all roots approximately:
>>> from sympy import nroots
>>> nroots(p, 5)
[1.4142, 1.732]
If the coefficients are symbolic then :func:`sympy.polys.polyroots.roots`
or :func:`~.ground_roots` should be used instead.
>>> from sympy import roots, ground_roots
>>> p = x**2 - 3*x*y + 2*y**2
>>> roots(p, x)
{y: 1, 2*y: 1}
>>> ground_roots(p, x)
{y: 1, 2*y: 1}
Parameters
==========
f : :class:`~.Expr` or :class:`~.Poly`
A univariate polynomial with rational (or ``Float``) coefficients.
multiple : ``bool`` (default ``True``).
Whether to return a ``list`` of roots or a list of root/multiplicity
pairs.
radicals : ``bool`` (default ``True``)
Use simple radical formulae rather than :py:class:`~.ComplexRootOf` for
some irrational roots.
extension: ``bool`` (default ``False``)
Whether to construct an algebraic extension domain before computing
the roots. Setting to ``True`` is necessary for finding roots of a
polynomial with (irrational) algebraic coefficients but can be slow.
Returns
=======
A list of :class:`~.Expr` (usually :class:`~.ComplexRootOf`) representing
the real roots is returned. The roots are arranged in increasing order and
are repeated according to their multiplicities as roots of ``f``.
If ``multiple=False`` is passed then a list of root/multiplicity pairs is
returned instead.
If ``radicals=False`` is passed then all roots will be represented as
either rational numbers or :class:`~.ComplexRootOf`.
See also
========
Poly.real_roots:
The underlying :class:`Poly` method used by :func:`real_roots`.
rootof:
Compute a single numbered root of a univariate polynomial.
all_roots:
Compute all real and non-real roots using :func:`~.rootof`.
ground_roots:
Compute some roots in the ground domain by factorisation.
nroots:
Compute all roots using approximate numerical techniques.
sympy.polys.polyroots.roots:
Compute symbolic expressions for roots using radical formulae.
References
==========
.. [1] https://en.wikipedia.org/wiki/Casus_irreducibilis
"""
try:
if isinstance(f, Poly):
if extension and not f.domain.is_AlgebraicField:
F = Poly(f.expr, extension=True)
else:
F = f
else:
if extension:
F = Poly(f, extension=True)
else:
F = Poly(f, greedy=False)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except GeneratorsNeeded:
raise PolynomialError(
"Cannot compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple, radicals=radicals)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except GeneratorsNeeded:
raise PolynomialError(
"Cannot compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
if not isinstance(f, Poly) and not F.gen.is_Symbol:
# root of sin(x) + 1 is -1 but when someone
# passes an Expr instead of Poly they may not expect
# that the generator will be sin(x), not x
raise PolynomialError("generator must be a Symbol")
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, _signsimp=True, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol, together
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
Note: due to automatic distribution of Rationals, a sum divided by an integer
will appear as a sum. To recover a rational form use `together` on the result:
>>> cancel(x/2 + 1)
x/2 + 1
>>> together(_)
(x + 2)/2
"""
from sympy.simplify.simplify import signsimp
from sympy.polys.rings import sring
options.allowed_flags(args, ['polys'])
f = sympify(f)
if _signsimp:
f = signsimp(f)
opt = {}
if 'polys' in args:
opt['polys'] = args['polys']
if not isinstance(f, Tuple):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
if isinstance(p, Poly) and isinstance(q, Poly):
opt['gens'] = p.gens
opt['domain'] = p.domain
opt['polys'] = opt.get('polys', True)
p, q = p.as_expr(), q.as_expr()
else:
raise ValueError('unexpected argument: %s' % f)
from sympy.functions.elementary.piecewise import Piecewise
try:
if f.has(Piecewise):
raise PolynomialError()
R, (F, G) = sring((p, q), *gens, **args)
if not R.ngens:
if not isinstance(f, Tuple):
return f.expand()
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
c, nc = sift(f.args, lambda x:
x.is_commutative is True and not x.has(Piecewise),
binary=True)
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func(*c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
if isinstance(e, BooleanAtom) or not isinstance(e, Expr):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, (P, Q) = 1, F.cancel(G)
if opt.get('polys', False) and 'gens' not in opt:
opt['gens'] = R.symbols
if not isinstance(f, Tuple):
return c*(P.as_expr()/Q.as_expr())
else:
P, Q = P.as_expr(), Q.as_expr()
if not opt.get('polys', False):
return c, P, Q
else:
return c, Poly(P, *gens, **opt), Poly(Q, *gens, **opt)
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.is_Ring and not domain.is_Field:
opt = opt.clone({"domain": domain.get_field()})
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of :func:`~.solve_poly_system`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used. The
algorithm can be set using the ``method`` flag or with the
:func:`sympy.polys.polyconfig.setup` function.
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
def hurwitz_conditions(f, *gens, **args):
"""
See :func:`~.Poly.hurwitz_conditions`.
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('hurwitz_conditions', 1, exc)
return F.hurwitz_conditions()
@public
def schur_conditions(f, *gens, **args):
"""
See :func:`~.Poly.schur_conditions`.
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('schur_conditions', 1, exc)
return F.schur_conditions()
@public
| PurePoly |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/translator.py | {
"start": 7463,
"end": 13311
} | class ____:
"""Translator class which converts raw response data from the Tableau API into AssetSpecs.
Subclass this class to implement custom logic for each type of Tableau content.
"""
def get_asset_spec(self, data: TableauTranslatorData) -> AssetSpec:
if data.content_type == TableauContentType.SHEET:
return self.get_sheet_spec(data)
elif data.content_type == TableauContentType.DASHBOARD:
return self.get_dashboard_spec(data)
elif data.content_type == TableauContentType.DATA_SOURCE:
return self.get_data_source_spec(data)
else:
# switch back to check.assert_never when TableauContentType.WORKBOOK is handled
check.failed(f"unhandled type {data.content_type}")
def get_sheet_spec(self, data: TableauTranslatorData) -> AssetSpec:
data_source_ids = get_data_source_ids_from_sheet_props(props=data.properties)
data_source_keys = [
self.get_asset_spec(
TableauTranslatorData(
content_data=data.workspace_data.data_sources_by_id[data_source_id],
workspace_data=data.workspace_data,
)
).key
for data_source_id in data_source_ids
]
workbook_id = data.properties["workbook"]["luid"]
workbook_data = data.workspace_data.workbooks_by_id[workbook_id]
asset_key = AssetKey(
[
_coerce_input_to_valid_name(workbook_data.properties["name"]),
"sheet",
_coerce_input_to_valid_name(data.properties["name"]),
]
)
return AssetSpec(
key=asset_key,
deps=data_source_keys if data_source_keys else None,
tags={"dagster/storage_kind": "tableau", **TableauTagSet(asset_type="sheet")},
metadata={
**TableauViewMetadataSet(
id=data.properties["luid"],
workbook_id=data.properties["workbook"]["luid"],
project_name=workbook_data.properties["projectName"],
project_id=workbook_data.properties["projectLuid"],
)
},
kinds={"tableau", "sheet"},
)
def get_dashboard_spec(self, data: TableauTranslatorData) -> AssetSpec:
dashboard_upstream_sheets = data.properties.get("sheets", [])
sheet_ids = {sheet["luid"] for sheet in dashboard_upstream_sheets if sheet["luid"]}
sheet_keys = [
self.get_asset_spec(
TableauTranslatorData(
content_data=data.workspace_data.sheets_by_id[sheet_id],
workspace_data=data.workspace_data,
)
).key
for sheet_id in sheet_ids
]
dashboard_upstream_data_source_ids = data.properties.get("data_source_ids", [])
data_source_keys = [
self.get_asset_spec(
TableauTranslatorData(
content_data=data.workspace_data.data_sources_by_id[data_source_id],
workspace_data=data.workspace_data,
)
).key
for data_source_id in dashboard_upstream_data_source_ids
]
upstream_keys = sheet_keys + data_source_keys
workbook_id = data.properties["workbook"]["luid"]
workbook_data = data.workspace_data.workbooks_by_id[workbook_id]
asset_key = AssetKey(
[
_coerce_input_to_valid_name(workbook_data.properties["name"]),
"dashboard",
_coerce_input_to_valid_name(data.properties["name"]),
]
)
return AssetSpec(
key=asset_key,
deps=upstream_keys if upstream_keys else None,
tags={"dagster/storage_kind": "tableau", **TableauTagSet(asset_type="dashboard")},
metadata={
**TableauViewMetadataSet(
id=data.properties["luid"],
workbook_id=data.properties["workbook"]["luid"],
project_name=workbook_data.properties["projectName"],
project_id=workbook_data.properties["projectLuid"],
)
},
kinds={"tableau", "dashboard"},
)
def get_data_source_spec(self, data: TableauTranslatorData) -> AssetSpec:
kinds = {
"tableau",
*["extract" if data.properties["hasExtracts"] else "live"],
*["published datasource" if data.properties["isPublished"] else "embedded datasource"],
}
if data.properties["isPublished"]:
asset_key = AssetKey([_coerce_input_to_valid_name(data.properties["name"])])
else:
workbook_id = data.properties["workbook"]["luid"]
workbook_data = data.workspace_data.workbooks_by_id[workbook_id]
asset_key = AssetKey(
[
_coerce_input_to_valid_name(workbook_data.properties["name"]),
"embedded_datasource",
_coerce_input_to_valid_name(data.properties["name"]),
]
)
return AssetSpec(
key=asset_key,
tags={"dagster/storage_kind": "tableau", **TableauTagSet(asset_type="data_source")},
metadata={
**TableauDataSourceMetadataSet(
id=data.properties["luid"],
has_extracts=data.properties["hasExtracts"],
is_published=data.properties["isPublished"],
workbook_id=data.properties["workbook"]["luid"]
if not data.properties["isPublished"]
else None,
)
},
kinds=kinds,
)
| DagsterTableauTranslator |
python | django-extensions__django-extensions | django_extensions/management/commands/runscript.py | {
"start": 622,
"end": 935
} | class ____(Exception):
def __init__(self, value):
self.message = (
value + " If --dir-policy is custom than you must set correct directory in "
"--dir option or in settings.RUNSCRIPT_CHDIR"
)
def __str__(self):
return self.message
| BadCustomDirectoryException |
python | python-openxml__python-docx | src/docx/types.py | {
"start": 510,
"end": 848
} | class ____(Protocol):
"""An object that provides access to its XmlPart.
This type is for objects that need access to their part but it either isn't a
StoryPart or they don't care, possibly because they just need access to the package
or related parts.
"""
@property
def part(self) -> XmlPart: ...
| ProvidesXmlPart |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 49777,
"end": 50144
} | class ____(CoupledSpinState, Bra):
"""Coupled eigenbra of Jx.
See JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JxKetCoupled
@classmethod
def uncoupled_class(self):
return JxBra
| JxBraCoupled |
python | google__jax | jax/_src/export/shape_poly.py | {
"start": 2591,
"end": 3045
} | class ____:
# Either e1 == e2 if cmp == Comparator.EQ else e1 >= e2
cmp: Comparator
debug_str: str # The form in which the user expressed it, for error messages
# e1, e2, and diff == e1 - e2, are normalized w.r.t. previous constraints only
e1: DimSize
e2: DimSize
# we pre-compute diff to avoid having the normalization rule kick in later.
diff: DimSize
def __repr__(self):
return f"Constraint({self.debug_str})"
| _SymbolicConstraint |
python | django__django | django/forms/models.py | {
"start": 9219,
"end": 12106
} | class ____(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
new_class = super().__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, "Meta", None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ["fields", "exclude", "localized_fields"]:
value = getattr(opts, opt)
if isinstance(value, str) and value != ALL_FIELDS:
msg = (
"%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?"
% {
"model": new_class.__name__,
"opt": opt,
"value": value,
}
)
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(
opts.model,
opts.fields,
opts.exclude,
opts.widgets,
opts.formfield_callback,
opts.localized_fields,
opts.labels,
opts.help_texts,
opts.error_messages,
opts.field_classes,
# limit_choices_to will be applied during ModelForm.__init__().
apply_limit_choices_to=False,
form_declared_fields=new_class.declared_fields,
)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = {k for k, v in fields.items() if not v}
missing_fields = none_model_fields.difference(new_class.declared_fields)
if missing_fields:
message = "Unknown field(s) (%s) specified for %s"
message %= (", ".join(missing_fields), opts.model.__name__)
raise FieldError(message)
# Include all the other declared fields.
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
| ModelFormMetaclass |
python | coleifer__peewee | tests/fields.py | {
"start": 10431,
"end": 13302
} | class ____(ModelTestCase):
requires = [User, Tweet]
def test_set_fk(self):
huey = User.create(username='huey')
zaizee = User.create(username='zaizee')
# Test resolution of attributes after creation does not trigger SELECT.
with self.assertQueryCount(1):
tweet = Tweet.create(content='meow', user=huey)
self.assertEqual(tweet.user.username, 'huey')
# Test we can set to an integer, in which case a query will occur.
with self.assertQueryCount(2):
tweet = Tweet.create(content='purr', user=zaizee.id)
self.assertEqual(tweet.user.username, 'zaizee')
# Test we can set the ID accessor directly.
with self.assertQueryCount(2):
tweet = Tweet.create(content='hiss', user_id=huey.id)
self.assertEqual(tweet.user.username, 'huey')
def test_follow_attributes(self):
huey = User.create(username='huey')
Tweet.create(content='meow', user=huey)
Tweet.create(content='hiss', user=huey)
with self.assertQueryCount(1):
query = (Tweet
.select(Tweet.content, Tweet.user.username)
.join(User)
.order_by(Tweet.content))
self.assertEqual([(tweet.content, tweet.user.username)
for tweet in query],
[('hiss', 'huey'), ('meow', 'huey')])
self.assertRaises(AttributeError, lambda: Tweet.user.foo)
def test_disable_backref(self):
class Person(TestModel):
pass
class Pet(TestModel):
owner = ForeignKeyField(Person, backref='!')
self.assertEqual(Pet.owner.backref, '!')
# No attribute/accessor is added to the related model.
self.assertRaises(AttributeError, lambda: Person.pet_set)
# We still preserve the metadata about the relationship.
self.assertTrue(Pet.owner in Person._meta.backrefs)
@requires_models(U2, T2)
def test_on_delete_behavior(self):
if IS_SQLITE:
self.database.foreign_keys = 1
with self.database.atomic():
for username in ('u1', 'u2', 'u3'):
user = U2.create(username=username)
for i in range(3):
T2.create(user=user, content='%s-%s' % (username, i))
self.assertEqual(T2.select().count(), 9)
U2.delete().where(U2.username == 'u2').execute()
self.assertEqual(T2.select().count(), 6)
query = (U2
.select(U2.username, fn.COUNT(T2.id).alias('ct'))
.join(T2, JOIN.LEFT_OUTER)
.group_by(U2.username)
.order_by(U2.username))
self.assertEqual([(u.username, u.ct) for u in query], [
('u1', 3),
('u3', 3)])
| TestForeignKeyField |
python | PyCQA__flake8 | tests/integration/test_checker.py | {
"start": 489,
"end": 10971
} | class ____:
"""Simple file plugin class yielding the expected report."""
def __init__(self, tree):
"""Construct a dummy object to provide mandatory parameter."""
pass
def run(self):
"""Run class yielding one element containing the expected report."""
yield EXPECTED_REPORT + (type(self),)
def plugin_func_gen(tree):
"""Yield the expected report."""
yield EXPECTED_REPORT + (type(plugin_func_gen),)
def plugin_func_list(tree):
"""Return a list of expected reports."""
return [EXPECTED_REPORT + (type(plugin_func_list),)]
def plugin_func_physical_ret(physical_line):
"""Expect report from a physical_line. Single return."""
return EXPECTED_REPORT_PHYSICAL_LINE
def plugin_func_physical_none(physical_line):
"""Expect report from a physical_line. No results."""
return None
def plugin_func_physical_list_single(physical_line):
"""Expect report from a physical_line. List of single result."""
return [EXPECTED_REPORT_PHYSICAL_LINE]
def plugin_func_physical_list_multiple(physical_line):
"""Expect report from a physical_line. List of multiple results."""
return [EXPECTED_REPORT_PHYSICAL_LINE] * 2
def plugin_func_physical_gen_single(physical_line):
"""Expect report from a physical_line. Generator of single result."""
yield EXPECTED_REPORT_PHYSICAL_LINE
def plugin_func_physical_gen_multiple(physical_line):
"""Expect report from a physical_line. Generator of multiple results."""
for _ in range(3):
yield EXPECTED_REPORT_PHYSICAL_LINE
def plugin_func_out_of_bounds(logical_line):
"""This produces an error out of bounds."""
yield 10000, "L100 test"
def mock_file_checker_with_plugin(plugin_target):
"""Get a mock FileChecker class with plugin_target registered.
Useful as a starting point for mocking reports/results.
"""
to_load = [
finder.Plugin(
"flake-package",
"9001",
importlib.metadata.EntryPoint(
"Q",
f"{plugin_target.__module__}:{plugin_target.__name__}",
"flake8.extension",
),
),
]
opts = finder.PluginOptions.blank()
plugins = finder.load_plugins(to_load, opts)
# Prevent it from reading lines from stdin or somewhere else
with mock.patch(
"flake8.processor.FileProcessor.read_lines", return_value=["Line 1"],
):
file_checker = checker.FileChecker(
filename="-",
plugins=plugins.checkers,
options=mock.MagicMock(),
)
return file_checker
@pytest.mark.parametrize(
"plugin_target",
[
PluginClass,
plugin_func_gen,
plugin_func_list,
],
)
def test_handle_file_plugins(plugin_target):
"""Test the FileChecker class handling different file plugin types."""
file_checker = mock_file_checker_with_plugin(plugin_target)
# Do not actually build an AST
file_checker.processor.build_ast = lambda: True
# Forward reports to this mock
report = mock.Mock()
file_checker.report = report
file_checker.run_ast_checks()
report.assert_called_once_with(
error_code=None,
line_number=EXPECTED_REPORT[0],
column=EXPECTED_REPORT[1],
text=EXPECTED_REPORT[2],
)
@pytest.mark.parametrize(
"plugin_target,len_results",
[
(plugin_func_physical_ret, 1),
(plugin_func_physical_none, 0),
(plugin_func_physical_list_single, 1),
(plugin_func_physical_list_multiple, 2),
(plugin_func_physical_gen_single, 1),
(plugin_func_physical_gen_multiple, 3),
],
)
def test_line_check_results(plugin_target, len_results):
"""Test the FileChecker class handling results from line checks."""
file_checker = mock_file_checker_with_plugin(plugin_target)
# Results will be stored in an internal array
file_checker.run_physical_checks(PHYSICAL_LINE)
expected = [EXPECTED_RESULT_PHYSICAL_LINE] * len_results
assert file_checker.results == expected
def test_logical_line_offset_out_of_bounds():
"""Ensure that logical line offsets that are out of bounds do not crash."""
file_checker = mock_file_checker_with_plugin(plugin_func_out_of_bounds)
logical_ret = (
"",
'print("xxxxxxxxxxx")',
[(0, (1, 0)), (5, (1, 5)), (6, (1, 6)), (19, (1, 19)), (20, (1, 20))],
)
with mock.patch.object(
FileProcessor,
"build_logical_line",
return_value=logical_ret,
):
file_checker.run_logical_checks()
assert file_checker.results == [("L100", 0, 0, "test", None)]
PLACEHOLDER_CODE = 'some_line = "of" * code'
@pytest.mark.parametrize(
"results, expected_order",
[
# No entries should be added
([], []),
# Results are correctly ordered
(
[
("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
],
[0, 1],
),
# Reversed order of lines
(
[
("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
],
[1, 0],
),
# Columns are not ordered correctly
# (when reports are ordered correctly)
(
[
("A101", 1, 2, "placeholder error", PLACEHOLDER_CODE),
("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
],
[1, 0, 2],
),
(
[
("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 1, 2, "placeholder error", PLACEHOLDER_CODE),
],
[1, 2, 0],
),
(
[
("A101", 1, 2, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 2, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
],
[0, 2, 1],
),
(
[
("A101", 1, 3, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 2, "placeholder error", PLACEHOLDER_CODE),
("A101", 3, 1, "placeholder error", PLACEHOLDER_CODE),
],
[0, 1, 2],
),
(
[
("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 1, 3, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 2, "placeholder error", PLACEHOLDER_CODE),
],
[0, 1, 2],
),
# Previously sort column and message (so reversed) (see bug 196)
(
[
("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
("A101", 2, 1, "charlie error", PLACEHOLDER_CODE),
],
[0, 1],
),
],
)
def test_report_order(results, expected_order):
"""
Test in which order the results will be reported.
It gets a list of reports from the file checkers and verifies that the
result will be ordered independent from the original report.
"""
def count_side_effect(name, sorted_results):
"""Side effect for the result handler to tell all are reported."""
return len(sorted_results)
# To simplify the parameters (and prevent copy & pasting) reuse report
# tuples to create the expected result lists from the indexes
expected_results = [results[index] for index in expected_order]
style_guide = mock.MagicMock(spec=["options", "processing_file"])
# Create a placeholder manager without arguments or plugins
# Just add one custom file checker which just provides the results
manager = checker.Manager(style_guide, finder.Checkers([], [], []), [])
manager.results = [("placeholder", results, {})]
# _handle_results is the first place which gets the sorted result
# Should something non-private be mocked instead?
handler = mock.Mock(side_effect=count_side_effect)
with mock.patch.object(manager, "_handle_results", handler):
assert manager.report() == (len(results), len(results))
handler.assert_called_once_with("placeholder", expected_results)
def test_acquire_when_multiprocessing_pool_can_initialize():
"""Verify successful importing of hardware semaphore support.
Mock the behaviour of a platform that has a hardware sem_open
implementation, and then attempt to initialize a multiprocessing
Pool object.
This simulates the behaviour on most common platforms.
"""
with mock.patch("multiprocessing.Pool") as pool:
result = checker._try_initialize_processpool(2, [])
pool.assert_called_once_with(2, checker._mp_init, initargs=([],))
assert result is pool.return_value
def test_acquire_when_multiprocessing_pool_can_not_initialize():
"""Verify unsuccessful importing of hardware semaphore support.
Mock the behaviour of a platform that has not got a hardware sem_open
implementation, and then attempt to initialize a multiprocessing
Pool object.
This scenario will occur on platforms such as Termux and on some
more exotic devices.
https://github.com/python/cpython/blob/4e02981de0952f54bf87967f8e10d169d6946b40/Lib/multiprocessing/synchronize.py#L30-L33
"""
with mock.patch("multiprocessing.Pool", side_effect=ImportError) as pool:
result = checker._try_initialize_processpool(2, [])
pool.assert_called_once_with(2, checker._mp_init, initargs=([],))
assert result is None
def test_handling_syntaxerrors_across_pythons():
"""Verify we properly handle exception argument tuples.
Python 3.10 added more information to the SyntaxError parse token tuple.
We need to handle that correctly to avoid crashing.
https://github.com/PyCQA/flake8/issues/1372
"""
err = SyntaxError(
"invalid syntax", ("<unknown>", 2, 1, "bad python:\n", 2, 11),
)
expected = (2, 1)
file_checker = checker.FileChecker(
filename="-",
plugins=finder.Checkers([], [], []),
options=mock.MagicMock(),
)
actual = file_checker._extract_syntax_information(err)
assert actual == expected
| PluginClass |
python | scipy__scipy | scipy/interpolate/tests/test_interpolate.py | {
"start": 90479,
"end": 102263
} | class ____:
def test_simple_1d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5)
x = np.linspace(0, 1, 5+1)
xi = rng.rand(200)
p = NdPPoly(c, (x,))
v1 = p((xi,))
v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
xp_assert_close(v1, v2)
def test_simple_2d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5, 6, 7)
x = np.linspace(0, 1, 6+1)
y = np.linspace(0, 1, 7+1)**2
xi = rng.rand(200)
yi = rng.rand(200)
v1 = np.empty([len(xi), 1], dtype=c.dtype)
v1.fill(np.nan)
_ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
(x, y),
np.array([4, 5], dtype=np.intc),
np.c_[xi, yi],
np.array([0, 0], dtype=np.intc),
1,
v1)
v1 = v1.ravel()
v2 = _ppoly2d_eval(c, (x, y), xi, yi)
xp_assert_close(v1, v2)
p = NdPPoly(c, (x, y))
for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
v1 = p(np.c_[xi, yi], nu=nu)
v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
xp_assert_close(v1, v2, err_msg=repr(nu))
def test_simple_3d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
xi = rng.rand(40)
yi = rng.rand(40)
zi = rng.rand(40)
p = NdPPoly(c, (x, y, z))
for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
(6, 0, 2)):
v1 = p((xi, yi, zi), nu=nu)
v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
xp_assert_close(v1, v2, err_msg=repr(nu))
def test_simple_4d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5, 6, 7, 8, 9, 10, 11)
x = np.linspace(0, 1, 8+1)
y = np.linspace(0, 1, 9+1)**2
z = np.linspace(0, 1, 10+1)**3
u = np.linspace(0, 1, 11+1)**4
xi = rng.rand(20)
yi = rng.rand(20)
zi = rng.rand(20)
ui = rng.rand(20)
p = NdPPoly(c, (x, y, z, u))
v1 = p((xi, yi, zi, ui))
v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
xp_assert_close(v1, v2)
def test_deriv_1d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5)
x = np.linspace(0, 1, 5+1)
p = NdPPoly(c, (x,))
# derivative
dp = p.derivative(nu=[1])
p1 = PPoly(c, x)
dp1 = p1.derivative()
xp_assert_close(dp.c, dp1.c)
# antiderivative
dp = p.antiderivative(nu=[2])
p1 = PPoly(c, x)
dp1 = p1.antiderivative(2)
xp_assert_close(dp.c, dp1.c)
def test_deriv_3d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
p = NdPPoly(c, (x, y, z))
# differentiate vs x
p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
dp = p.derivative(nu=[2])
dp1 = p1.derivative(2)
xp_assert_close(dp.c,
dp1.c.transpose(0, 2, 3, 1, 4, 5))
# antidifferentiate vs y
p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
dp = p.antiderivative(nu=[0, 1, 0])
dp1 = p1.antiderivative(1)
xp_assert_close(dp.c,
dp1.c.transpose(2, 0, 3, 4, 1, 5))
# differentiate vs z
p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
dp = p.derivative(nu=[0, 0, 3])
dp1 = p1.derivative(3)
xp_assert_close(dp.c,
dp1.c.transpose(2, 3, 0, 4, 5, 1))
def test_deriv_3d_simple(self):
# Integrate to obtain function x y**2 z**4 / (2! 4!)
rng = np.random.RandomState(1234)
c = np.ones((1, 1, 1, 3, 4, 5))
x = np.linspace(0, 1, 3+1)**1
y = np.linspace(0, 1, 4+1)**2
z = np.linspace(0, 1, 5+1)**3
p = NdPPoly(c, (x, y, z))
ip = p.antiderivative((1, 0, 4))
ip = ip.antiderivative((0, 2, 0))
xi = rng.rand(20)
yi = rng.rand(20)
zi = rng.rand(20)
xp_assert_close(ip((xi, yi, zi)),
xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
def test_integrate_2d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5, 16, 17)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
# make continuously differentiable so that nquad() has an
# easier time
c = c.transpose(0, 2, 1, 3)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, x, 2)
c = cx.reshape(c.shape)
c = c.transpose(0, 2, 1, 3)
c = c.transpose(1, 3, 0, 2)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, y, 2)
c = cx.reshape(c.shape)
c = c.transpose(2, 0, 3, 1).copy()
# Check integration
p = NdPPoly(c, (x, y))
for ranges in [[(0, 1), (0, 1)],
[(0, 0.5), (0, 1)],
[(0, 1), (0, 0.5)],
[(0.3, 0.7), (0.6, 0.2)]]:
ig = p.integrate(ranges)
ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
xp_assert_close(ig, ig2, rtol=1e-5, atol=1e-5, check_0d=False,
err_msg=repr(ranges))
def test_integrate_1d(self):
rng = np.random.RandomState(1234)
c = rng.rand(4, 5, 6, 16, 17, 18)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
z = np.linspace(0, 1, 18+1)**3
# Check 1-D integration
p = NdPPoly(c, (x, y, z))
u = rng.rand(200)
v = rng.rand(200)
a, b = 0.2, 0.7
px = p.integrate_1d(a, b, axis=0)
pax = p.antiderivative((1, 0, 0))
xp_assert_close(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
py = p.integrate_1d(a, b, axis=1)
pay = p.antiderivative((0, 1, 0))
xp_assert_close(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
pz = p.integrate_1d(a, b, axis=2)
paz = p.antiderivative((0, 0, 1))
xp_assert_close(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
def test_concurrency(self):
rng = np.random.default_rng(12345)
c = rng.uniform(size=(4, 5, 6, 7, 8, 9))
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
p = NdPPoly(c, (x, y, z))
def worker_fn(_, spl):
xi = rng.uniform(size=40)
yi = rng.uniform(size=40)
zi = rng.uniform(size=40)
spl((xi, yi, zi))
_run_concurrent_barrier(10, worker_fn, p)
def _ppoly_eval_1(c, x, xps):
"""Evaluate piecewise polynomial manually"""
out = np.zeros((len(xps), c.shape[2]))
for i, xp in enumerate(xps):
if xp < 0 or xp > 1:
out[i,:] = np.nan
continue
j = np.searchsorted(x, xp) - 1
d = xp - x[j]
assert x[j] <= xp < x[j+1]
r = sum(c[k,j] * d**(c.shape[0]-k-1)
for k in range(c.shape[0]))
out[i,:] = r
return out
def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
"""Evaluate piecewise polynomial manually (another way)"""
a = breaks[0]
b = breaks[-1]
K = coeffs.shape[0]
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= a) & (xnew <= b)
res[~mask] = fill
xx = xnew.compress(mask)
indxs = np.searchsorted(breaks, xx)-1
indxs = indxs.clip(0, len(breaks))
pp = coeffs
diff = xx - breaks.take(indxs)
V = np.vander(diff, N=K)
values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in range(len(xx))])
res[mask] = values
res = res.reshape(saveshape)
return res
def _dpow(x, y, n):
"""
d^n (x**y) / dx^n
"""
if n < 0:
raise ValueError("invalid derivative order")
elif n > y:
return 0
else:
return poch(y - n + 1, n) * x**(y - n)
def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
"""
Straightforward evaluation of 2-D piecewise polynomial
"""
if nu is None:
nu = (0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny = c.shape[:2]
for jout, (x, y) in enumerate(zip(xnew, ynew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
val += (c[nx-k1-1,ny-k2-1,j1,j2]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1]))
out[jout] = val
return out
def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
"""
Straightforward evaluation of 3-D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny, nz = c.shape[:3]
for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2]))
out[jout] = val
return out
def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
"""
Straightforward evaluation of 4-D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
mx, my, mz, mu = c.shape[:4]
for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1]) and
(xs[3][0] <= u <= xs[3][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
j4 = np.searchsorted(xs[3], u) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
s4 = u - xs[3][j4]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
for k4 in range(c.shape[3]):
val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2])
* _dpow(s4, k4, nu[3]))
out[jout] = val
return out
| TestNdPPoly |
python | keras-team__keras | keras/src/layers/attention/additive_attention.py | {
"start": 181,
"end": 4309
} | class ____(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
| AdditiveAttention |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 1163,
"end": 1236
} | class ____(Token):
id = '<block sequence start>'
| BlockSequenceStartToken |
python | RaRe-Technologies__gensim | gensim/corpora/bleicorpus.py | {
"start": 426,
"end": 5768
} | class ____(IndexedCorpus):
"""Corpus in Blei's LDA-C format.
The corpus is represented as two files: one describing the documents, and another
describing the mapping between words and their ids.
Each document is one line::
N fieldId1:fieldValue1 fieldId2:fieldValue2 ... fieldIdN:fieldValueN
The vocabulary is a file with words, one word per line; word at line K has an implicit `id=K`.
"""
def __init__(self, fname, fname_vocab=None):
"""
Parameters
----------
fname : str
Path to corpus.
fname_vocab : str, optional
Vocabulary file. If `fname_vocab` is None, searching one of variants:
* `fname`.vocab
* `fname`/vocab.txt
* `fname_without_ext`.vocab
* `fname_folder`/vocab.txt
Raises
------
IOError
If vocabulary file doesn't exist.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
if fname_vocab is None:
fname_base, _ = path.splitext(fname)
fname_dir = path.dirname(fname)
for fname_vocab in [
utils.smart_extension(fname, '.vocab'),
utils.smart_extension(fname, '/vocab.txt'),
utils.smart_extension(fname_base, '.vocab'),
utils.smart_extension(fname_dir, '/vocab.txt'),
]:
if path.exists(fname_vocab):
break
else:
raise IOError('BleiCorpus: could not find vocabulary file')
self.fname = fname
with utils.open(fname_vocab, 'rb') as fin:
words = [utils.to_unicode(word).rstrip() for word in fin]
self.id2word = dict(enumerate(words))
def __iter__(self):
"""Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document's BoW representation.
"""
lineno = -1
with utils.open(self.fname, 'rb') as fin:
for lineno, line in enumerate(fin):
yield self.line2doc(line)
self.length = lineno + 1
def line2doc(self, line):
"""Convert line in Blei LDA-C format to document (BoW representation).
Parameters
----------
line : str
Line in Blei's LDA-C format.
Returns
-------
list of (int, float)
Document's BoW representation.
"""
parts = utils.to_unicode(line).split()
if int(parts[0]) != len(parts) - 1:
raise ValueError("invalid format in %s: %s" % (self.fname, repr(line)))
doc = [part.rsplit(':', 1) for part in parts[1:]]
doc = [(int(p1), float(p2)) for p1, p2 in doc]
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save a corpus in the LDA-C format.
Notes
-----
There are actually two files saved: `fname` and `fname.vocab`, where `fname.vocab` is the vocabulary file.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Input corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word for `corpus`.
metadata : bool, optional
THIS PARAMETER WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
elif id2word:
num_terms = 1 + max(id2word)
else:
num_terms = 0
logger.info("storing corpus in Blei's LDA-C format into %s", fname)
with utils.open(fname, 'wb') as fout:
offsets = []
for doc in corpus:
doc = list(doc)
offsets.append(fout.tell())
parts = ["%i:%g" % p for p in doc if abs(p[1]) > 1e-7]
fout.write(utils.to_utf8("%i %s\n" % (len(doc), ' '.join(parts))))
# write out vocabulary, in a format compatible with Blei's topics.py script
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s", num_terms, fname_vocab)
with utils.open(fname_vocab, 'wb') as fout:
for featureid in range(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
return offsets
def docbyoffset(self, offset):
"""Get document corresponding to `offset`.
Offset can be given from :meth:`~gensim.corpora.bleicorpus.BleiCorpus.save_corpus`.
Parameters
----------
offset : int
Position of the document in the file (in bytes).
Returns
-------
list of (int, float)
Document in BoW format.
"""
with utils.open(self.fname, 'rb') as f:
f.seek(offset)
return self.line2doc(f.readline())
| BleiCorpus |
python | ipython__ipython | tests/test_tools.py | {
"start": 2135,
"end": 2549
} | class ____(unittest.TestCase):
def test_passing(self):
with tt.AssertPrints("abc"):
print("abcd")
print("def")
print(b"ghi")
def test_failing(self):
def func():
with tt.AssertPrints("abc"):
print("acd")
print("def")
print(b"ghi")
self.assertRaises(AssertionError, func)
| TestAssertPrints |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/app_testing/tutorial001_py310/main.py | {
"start": 130,
"end": 267
} | class ____(SQLModel):
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
| HeroBase |
python | huggingface__transformers | src/transformers/models/olmo3/convert_olmo3_weights_to_hf.py | {
"start": 3317,
"end": 3452
} | class ____:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
@dataclass
| _StorageInfo |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 1611,
"end": 1864
} | class ____(Message):
message = "%r may be undefined, or defined from star imports: %s"
def __init__(self, filename, loc, name, from_list):
Message.__init__(self, filename, loc)
self.message_args = (name, from_list)
| ImportStarUsage |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_context.py | {
"start": 764,
"end": 1580
} | class ____(threading.local):
"""A context for saving checkpoint upon preemption."""
def __init__(self):
super().__init__()
self._in_preemption_save_context = False
def enter_preemption_save_context(self):
self._in_preemption_save_context = True
def exit_preemption_save_context(self):
self._in_preemption_save_context = False
def in_preemption_save_context(self):
return self._in_preemption_save_context
_preemption_save_context = PreemptionSaveContext()
@contextlib.contextmanager
def preemption_save_context():
_preemption_save_context.enter_preemption_save_context()
try:
yield
finally:
_preemption_save_context.exit_preemption_save_context()
def in_preemption_save_context():
return _preemption_save_context.in_preemption_save_context()
| PreemptionSaveContext |
python | pytorch__pytorch | test/distributed/_composable/test_replicate_with_compiler.py | {
"start": 1399,
"end": 2271
} | class ____(nn.Module):
def __init__(self, checkpoint=False):
super().__init__()
self.fc1 = nn.Linear(DIM, DIM)
self.fc2 = nn.Linear(DIM, DIM)
self.fc3 = nn.Linear(DIM, DIM)
self.fc4 = nn.Linear(DIM, DIM)
self.use_checkpoint = checkpoint
def forward(self, x):
if self.use_checkpoint:
_fc1 = checkpoint(self.fc1, x, use_reentrant=False)
else:
_fc1 = self.fc1(x)
return self.fc4(self.fc3(self.fc2(_fc1)))
def compiler_fn(no_inductor=False):
def _compiler_fn(gm):
def inner_compiler(gm_, example_inputs_):
if no_inductor:
return gm_
else:
return inductor.compile(gm_, example_inputs_)
gm = torch.compile(gm, fullgraph=True, backend=inner_compiler)
return gm
return _compiler_fn
| Net |
python | getsentry__sentry | tests/sentry/models/test_teamkeytransaction.py | {
"start": 366,
"end": 2151
} | class ____(TransactionTestCase):
def test_custom_manger(self) -> None:
self.assertIsInstance(TeamKeyTransaction.objects, TeamKeyTransactionModelManager)
@receivers_raise_on_send()
def test_post_save_signal_runs_if_dynamic_sampling_is_disabled(self) -> None:
self.project = self.create_project(name="foo")
team = self.create_team(organization=self.organization, name="Team A")
self.project.add_team(team)
with patch("sentry.discover.models.schedule_invalidate_project_config") as mock_task:
TeamKeyTransaction.objects.create(
organization=self.organization,
transaction="/foo",
project_team=ProjectTeam.objects.get(project=self.project, team=team),
)
assert mock_task.mock_calls == []
@receivers_raise_on_send()
def test_post_save_signal_runs_if_dynamic_sampling_is_enabled(self) -> None:
with Feature(
{
"organizations:dynamic-sampling": True,
}
):
self.project = self.create_project(name="foo")
team = self.create_team(organization=self.organization, name="Team A")
self.project.add_team(team)
with patch("sentry.discover.models.schedule_invalidate_project_config") as mock_task:
TeamKeyTransaction.objects.create(
organization=self.organization,
transaction="/foo",
project_team=ProjectTeam.objects.get(project=self.project, team=team),
)
assert mock_task.mock_calls == [
mock_call(project_id=self.project.id, trigger="teamkeytransaction.post_save")
]
| TeamKeyTransactionModelManagerTestCase |
python | Textualize__textual | docs/examples/tutorial/stopwatch04.py | {
"start": 240,
"end": 883
} | class ____(HorizontalGroup):
"""A stopwatch widget."""
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Event handler called when a button is pressed."""
if event.button.id == "start":
self.add_class("started")
elif event.button.id == "stop":
self.remove_class("started")
def compose(self) -> ComposeResult:
"""Create child widgets of a stopwatch."""
yield Button("Start", id="start", variant="success")
yield Button("Stop", id="stop", variant="error")
yield Button("Reset", id="reset")
yield TimeDisplay("00:00:00.00")
| Stopwatch |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py | {
"start": 79307,
"end": 84227
} | class ____(GoogleCloudBaseOperator):
"""
Updates an existing entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogUpdateEntryOperator`
:param entry: Required. The updated entry. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Entry`
:param update_mask: The fields to update on the entry. If absent or empty, all modifiable fields are
updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the entry to update.
:param entry_group: The entry group ID for the entry that is being updated.
:param entry_id: The entry ID that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"entry",
"update_mask",
"location",
"entry_group",
"entry_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
entry: dict | Entry,
update_mask: dict | FieldMask,
location: str | None = None,
entry_group: str | None = None,
entry_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.entry = entry
self.update_mask = update_mask
self.location = location
self.entry_group = entry_group
self.entry_id = entry_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.update_entry(
entry=self.entry,
update_mask=self.update_mask,
location=self.location,
entry_group=self.entry_group,
entry_id=self.entry_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
location_id, entry_group_id, entry_id = result.name.split("/")[3::2]
DataCatalogEntryLink.persist(
context=context,
entry_id=self.entry_id or entry_id,
entry_group_id=self.entry_group or entry_group_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogUpdateEntryOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| CloudDataCatalogUpdateEntryOperator |
python | pypa__setuptools | setuptools/_distutils/tests/test_check.py | {
"start": 338,
"end": 6226
} | class ____(support.TempdirManager):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
assert cmd._warnings == 1
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {
'url': 'xxx',
'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx',
'version': 'xxx',
}
cmd = self._run(metadata)
assert cmd._warnings == 0
# now with the strict mode, we should
# get an error if there are missing metadata
with pytest.raises(DistutilsSetupError):
self._run({}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=True)
assert cmd._warnings == 0
# now a test with non-ASCII characters
metadata = {
'url': 'xxx',
'author': '\u00c9ric',
'author_email': 'xxx',
'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df',
}
cmd = self._run(metadata)
assert cmd._warnings == 0
def test_check_author_maintainer(self):
for kind in ("author", "maintainer"):
# ensure no warning when author_email or maintainer_email is given
# (the spec allows these fields to take the form "Name <email>")
metadata = {
'url': 'xxx',
kind + '_email': 'Name <name@email.com>',
'name': 'xxx',
'version': 'xxx',
}
cmd = self._run(metadata)
assert cmd._warnings == 0
# the check should not warn if only email is given
metadata[kind + '_email'] = 'name@email.com'
cmd = self._run(metadata)
assert cmd._warnings == 0
# the check should not warn if only the name is given
metadata[kind] = "Name"
del metadata[kind + '_email']
cmd = self._run(metadata)
assert cmd._warnings == 0
def test_check_document(self):
pytest.importorskip('docutils')
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
assert len(msgs) == 1
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
assert len(msgs) == 0
def test_check_restructuredtext(self):
pytest.importorskip('docutils')
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
assert cmd._warnings == 1
# let's see if we have an error with strict=True
metadata = {
'url': 'xxx',
'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx',
'version': 'xxx',
'long_description': broken_rest,
}
with pytest.raises(DistutilsSetupError):
self._run(metadata, **{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=True, restructuredtext=True)
assert cmd._warnings == 0
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=True, restructuredtext=True)
assert cmd._warnings == 0
def test_check_restructuredtext_with_syntax_highlight(self):
pytest.importorskip('docutils')
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = [
textwrap.dedent(
"""\
Here's some code:
.. code:: python
def foo():
pass
"""
),
textwrap.dedent(
"""\
Here's some code:
.. code-block:: python
def foo():
pass
"""
),
]
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
assert len(msgs) == 0
else:
assert len(msgs) == 1
assert (
str(msgs[0][1])
== 'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
with pytest.raises(DistutilsSetupError):
self._run({}, **{'strict': 1, 'restructuredtext': 1})
| TestCheck |
python | getsentry__sentry | tests/sentry/integrations/bitbucket/test_webhook.py | {
"start": 741,
"end": 2479
} | class ____(APITestCase):
endpoint = "sentry-extensions-bitbucket-webhook"
def setUp(self) -> None:
super().setUp()
project = self.project # force creation
self.organization_id = project.organization.id
def send_webhook(self) -> None:
self.get_success_response(
self.organization_id,
raw_data=PUSH_EVENT_EXAMPLE,
extra_headers=dict(
HTTP_X_EVENT_KEY="repo:push",
REMOTE_ADDR=BITBUCKET_IP,
),
status_code=204,
)
def assert_commit(self) -> None:
commit_list = list(
Commit.objects.filter(organization_id=self.organization_id)
.select_related("author")
.order_by("-date_added")
)
assert len(commit_list) == 1
commit = commit_list[0]
assert commit.key == "e0e377d186e4f0e937bdb487a23384fe002df649"
assert commit.message == "README.md edited online with Bitbucket"
assert commit.author is not None
assert commit.author.name == "Max Bittker"
assert commit.author.email == "max@getsentry.com"
assert commit.author.external_id is None
assert commit.date_added == datetime(2017, 5, 24, 1, 5, 47, tzinfo=timezone.utc)
def create_repository(self, **kwargs: Any) -> Repository:
return Repository.objects.create(
**{
**dict(
organization_id=self.organization_id,
external_id="{c78dfb25-7882-4550-97b1-4e0d38f32859}",
provider=PROVIDER_NAME,
name="maxbittker/newsdiffs",
),
**kwargs,
}
)
| WebhookBaseTest |
python | pypa__setuptools | setuptools/_distutils/tests/test_bdist_rpm.py | {
"start": 902,
"end": 3932
} | class ____(
support.TempdirManager,
):
@mac_woes
@requires_zlib()
@pytest.mark.skipif("not shutil.which('rpm')")
@pytest.mark.skipif("not shutil.which('rpmbuild')")
def test_quiet(self):
# let's create a package
tmp_dir = self.mkdtemp()
os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({
'name': 'foo',
'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx',
'author': 'xxx',
'author_email': 'xxx',
})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
# running in quiet mode
cmd.quiet = True
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
assert 'foo-0.1-1.noarch.rpm' in dist_created
# bug #2945: upload ignores bdist_rpm files
assert ('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm') in dist.dist_files
assert ('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm') in dist.dist_files
@mac_woes
@requires_zlib()
# https://bugs.python.org/issue1533164
@pytest.mark.skipif("not shutil.which('rpm')")
@pytest.mark.skipif("not shutil.which('rpmbuild')")
def test_no_optimize_flag(self):
# let's create a package that breaks bdist_rpm
tmp_dir = self.mkdtemp()
os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({
'name': 'foo',
'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx',
'author': 'xxx',
'author_email': 'xxx',
})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
cmd.quiet = True
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
assert 'foo-0.1-1.noarch.rpm' in dist_created
# bug #2945: upload ignores bdist_rpm files
assert ('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm') in dist.dist_files
assert ('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm') in dist.dist_files
os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
| TestBuildRpm |
python | huggingface__transformers | src/transformers/models/gemma3n/modular_gemma3n.py | {
"start": 105907,
"end": 119160
} | class ____(PaliGemmaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Gemma3nConfig):
super().__init__(config)
del self.multi_modal_projector # Replaced by Gemma3nVisionEmbedder
del self.text_config_dtype
self.vocab_size_per_layer_input = config.text_config.vocab_size_per_layer_input
self.audio_tower = AutoModel.from_config(config.audio_config)
self.embed_vision = Gemma3nMultimodalEmbedder(config.vision_config, config.text_config)
self.embed_audio = Gemma3nMultimodalEmbedder(config.audio_config, config.text_config)
def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor:
"""
Projects the last hidden state from the vision model into language model space.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_outputs = self.vision_tower(
pixel_values=pixel_values, do_pooling=False, return_dict=True
).last_hidden_state
# Convert from (batch, channels, height, width) to (batch, height * width, channels) where:
# height == width and height * width == Gemma3nConfig.vision_soft_tokens_per_image.
vision_outputs = vision_outputs.reshape(
vision_outputs.shape[0],
self.config.vision_config.hidden_size,
self.config.vision_soft_tokens_per_image,
).permute(0, 2, 1)
# Normalize and embed the soft tokens into language model space.
vision_outputs *= self.config.vision_config.hidden_size**0.5
return self.embed_vision(inputs_embeds=vision_outputs)
def get_placeholder_mask(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_features: Optional[torch.FloatTensor] = None,
audio_features: Optional[torch.FloatTensor] = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_audio_mask = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
)
).all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_audio_mask = input_ids == self.config.audio_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0] * image_features.shape[1]}"
)
n_audio_tokens = special_audio_mask.sum()
special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if audio_features is not None and inputs_embeds[special_audio_mask].numel() != audio_features.numel():
raise ValueError(
f"Audio features and image tokens do not match: tokens: {n_audio_tokens}, features {audio_features.shape[0] * audio_features.shape[1]}"
)
return special_image_mask, special_audio_mask
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None, # text inputs
pixel_values: Optional[torch.FloatTensor] = None, # vision inputs
input_features: Optional[torch.FloatTensor] = None, # audio inputs
attention_mask: Optional[torch.Tensor] = None,
input_features_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
**lm_kwargs,
) -> Gemma3nCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Gemma3nForConditionalGeneration
>>> model = Gemma3nForConditionalGeneration.from_pretrained("google/gemma3n2-3b-mix-224")
>>> processor = AutoProcessor.from_pretrained("google/gemma3n2-3b-mix-224")
>>> prompt = "Where is the cat standing?"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs,)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Where is the cat standing?\nsnow"
```
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# Prepare per-layer inputs from inputs_ids
per_layer_inputs_mask = torch.logical_and(input_ids >= 0, input_ids < self.vocab_size_per_layer_input)
per_layer_inputs_tokens = torch.where(per_layer_inputs_mask, input_ids, torch.zeros_like(input_ids))
per_layer_inputs = self.language_model.get_per_layer_inputs(per_layer_inputs_tokens)
# Handle vision tokens (>= embed_vision.vocab_offset and < embed_audio.vocab_offset)
vision_mask = torch.logical_and(
input_ids >= self.embed_vision.vocab_offset, input_ids < self.embed_audio.vocab_offset
)
dummy_vision_token_id = self.embed_vision.vocab_offset + self.embed_vision.vocab_size - 1
vision_input_ids = torch.where(vision_mask, input_ids, dummy_vision_token_id).to(inputs_embeds.device)
vision_embeds = self.embed_vision(input_ids=vision_input_ids)
vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
expanded_vision_mask = vision_mask.unsqueeze(-1).expand_as(inputs_embeds)
inputs_embeds = torch.where(expanded_vision_mask, vision_embeds, inputs_embeds)
# Handle audio tokens (>= embed_audio.vocab_offset)
audio_mask = input_ids >= self.embed_audio.vocab_offset
dummy_audio_token_id = self.embed_audio.vocab_offset + self.embed_audio.vocab_size - 1
audio_input_ids = torch.where(audio_mask, input_ids, dummy_audio_token_id).to(inputs_embeds.device)
audio_embeds = self.embed_audio(input_ids=audio_input_ids)
audio_embeds = audio_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
expanded_audio_mask = audio_mask.unsqueeze(-1).expand_as(inputs_embeds)
inputs_embeds = torch.where(expanded_audio_mask, audio_embeds, inputs_embeds)
else:
per_layer_inputs = None
# Merge text and images
if pixel_values is not None:
image_features = self.get_image_features(pixel_values)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
# Merge text and audio
if input_features is not None and input_features_mask is not None:
audio_features, audio_mask = self.get_audio_features(input_features, ~input_features_mask)
# The Gemma3nProcessor expects all audio will be 30s in length and inserts 188 audio soft tokens into the
# text to account for this. However, the audio preprocessing and encoder do not gurarantee they will
# produce 188 soft tokens; they will produce at most that many tokens, but they may produce fewer tokens
# depending on the length of the longest audio input in the batch. When we encounter this situation, we pad
# the audio feature out to 188 soft tokens with the emebedding of the last token in the embed_audio vocab.
audio_padding_toks = torch.tensor([[self.vocab_size - 1]], dtype=torch.long, device=audio_features.device)
audio_padding_embs = self.embed_audio(input_ids=audio_padding_toks)
audio_features = torch.where(audio_mask.unsqueeze(-1), audio_padding_embs, audio_features)
audio_batch_size, audio_seq_len, audio_embed_dim = audio_features.shape
extra_padding_tokens = self.config.audio_soft_tokens_per_image - audio_seq_len
extra_padding_features = audio_padding_embs.expand(audio_batch_size, extra_padding_tokens, audio_embed_dim)
audio_features = torch.cat((audio_features, extra_padding_features), dim=1)
audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
_, special_audio_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, audio_features=audio_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_audio_mask, audio_features)
outputs = self.language_model(
input_ids=None,
per_layer_inputs=per_layer_inputs,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
return Gemma3nModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
audio_hidden_states=audio_features if input_features is not None else None,
)
def get_audio_features(
self, input_features: torch.Tensor, input_features_mask: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Projects the last hidden state from the audio encoder into language model space.
Args:
input_features (`torch.FloatTensor]` of shape `(num_images, seq_length, num_features)`):
The tensors corresponding to the input audio.
input_features_mask (`torch.FloatTensor]` of shape `(num_images, seq_length)`):
The attention mask for the input audio.
Returns:
audio_features (`torch.Tensor`): Audio feature tensor of shape `(num_images, audio_length, embed_dim)`).
"""
audio_outputs, audio_mask = self.audio_tower(input_features, input_features_mask)
return self.embed_audio(inputs_embeds=audio_outputs), audio_mask
@auto_docstring(
custom_intro="""
The base Gemma 3n model comprising a vision backbone, an audio backbone, a language model, and a language modeling
head.
"""
)
| Gemma3nModel |
python | pytest-dev__pytest-django | pytest_django/plugin.py | {
"start": 25454,
"end": 28231
} | class ____:
"""Manager for django.db.backends.base.base.BaseDatabaseWrapper.
This is the object returned by django_db_blocker.
"""
def __init__(self, *, _ispytest: bool = False) -> None:
if not _ispytest: # pragma: no cover
raise TypeError(
"The DjangoDbBlocker constructor is private. "
"use the django_db_blocker fixture instead."
)
self._history = [] # type: ignore[var-annotated]
self._real_ensure_connection = None
@property
def _dj_db_wrapper(self) -> django.db.backends.base.base.BaseDatabaseWrapper:
from django.db.backends.base.base import BaseDatabaseWrapper
# The first time the _dj_db_wrapper is accessed, save a reference to the
# real implementation.
if self._real_ensure_connection is None:
self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection
return BaseDatabaseWrapper
def _save_active_wrapper(self) -> None:
self._history.append(self._dj_db_wrapper.ensure_connection)
def _blocking_wrapper(*args: Any, **kwargs: Any) -> NoReturn: # noqa: ARG002
__tracebackhide__ = True
raise RuntimeError(
"Database access not allowed, "
'use the "django_db" mark, or the '
'"db" or "transactional_db" fixtures to enable it.'
)
def unblock(self) -> AbstractContextManager[None]:
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._real_ensure_connection
return _DatabaseBlockerContextManager(self)
def block(self) -> AbstractContextManager[None]:
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._blocking_wrapper
return _DatabaseBlockerContextManager(self)
def restore(self) -> None:
"""Undo a previous call to block() or unblock().
Consider using block() and unblock() as context managers instead of
manually calling restore().
"""
self._dj_db_wrapper.ensure_connection = self._history.pop()
@property
def is_active(self) -> bool:
"""Whether a block() or unblock() is currently active."""
return bool(self._history)
# On Config.stash.
blocking_manager_key = pytest.StashKey[DjangoDbBlocker]()
def validate_urls(marker: pytest.Mark) -> list[str]:
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls: list[str]) -> list[str]:
return urls
return apifun(*marker.args, **marker.kwargs)
| DjangoDbBlocker |
python | kubernetes-client__python | kubernetes/client/models/v1_volume_attachment_list.py | {
"start": 383,
"end": 7049
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1VolumeAttachment]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1VolumeAttachmentList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1VolumeAttachmentList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1VolumeAttachmentList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1VolumeAttachmentList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1VolumeAttachmentList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1VolumeAttachmentList. # noqa: E501
items is the list of VolumeAttachments # noqa: E501
:return: The items of this V1VolumeAttachmentList. # noqa: E501
:rtype: list[V1VolumeAttachment]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1VolumeAttachmentList.
items is the list of VolumeAttachments # noqa: E501
:param items: The items of this V1VolumeAttachmentList. # noqa: E501
:type: list[V1VolumeAttachment]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1VolumeAttachmentList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1VolumeAttachmentList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1VolumeAttachmentList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1VolumeAttachmentList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1VolumeAttachmentList. # noqa: E501
:return: The metadata of this V1VolumeAttachmentList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1VolumeAttachmentList.
:param metadata: The metadata of this V1VolumeAttachmentList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VolumeAttachmentList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1VolumeAttachmentList):
return True
return self.to_dict() != other.to_dict()
| V1VolumeAttachmentList |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 65659,
"end": 65814
} | class ____:
xlGenerateTableRefA1 = 0 # from enum XlGenerateTableRefs
xlGenerateTableRefStruct = 1 # from enum XlGenerateTableRefs
| GenerateTableRefs |
python | kamyu104__LeetCode-Solutions | Python/domino-and-tromino-tiling.py | {
"start": 1034,
"end": 1665
} | class ____(object):
def numTilings(self, N):
"""
:type N: int
:rtype: int
"""
# Prove:
# dp[n] = dp[n-1](|) + dp[n-2](=) + 2*(dp[n-3](「」) + ... + d[0](「 = ... = 」))
# = dp[n-1] + dp[n-2] + dp[n-3] + dp[n-3] + 2*(dp[n-4] + ... + d[0])
# = dp[n-1] + dp[n-3] + (dp[n-2] + dp[n-3] + 2*(dp[n-4] + ... + d[0])
# = dp[n-1] + dp[n-3] + dp[n-1]
# = 2*dp[n-1] + dp[n-3]
M = int(1e9+7)
dp = [1, 1, 2]
for i in xrange(3, N+1):
dp[i%3] = (2*dp[(i-1)%3]%M + dp[(i-3)%3])%M
return dp[N%3]
| Solution2 |
python | google__pytype | pytype/tools/xref/indexer.py | {
"start": 7766,
"end": 8132
} | class ____:
"""A location of a symbol definition.
Attributes:
def_id: The definition id (scope + name)
location: The location of the definition in the source code.
Note that a single definition can have multiple locations, for symbols that
are redefined in the code.
"""
def_id: str
location: source.Location
@dataclasses.dataclass
| DefLocation |
python | doocs__leetcode | solution/1900-1999/1952.Three Divisors/Solution2.py | {
"start": 0,
"end": 229
} | class ____:
def isThree(self, n: int) -> bool:
cnt = 0
i = 1
while i <= n // i:
if n % i == 0:
cnt += 1 if i == n // i else 2
i += 1
return cnt == 3
| Solution |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 52262,
"end": 52964
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextLayer with Align->Clap
| ClapTextOutput |
python | pennersr__django-allauth | allauth/socialaccount/providers/amazon/provider.py | {
"start": 219,
"end": 268
} | class ____(ProviderAccount):
pass
| AmazonAccount |
python | huggingface__transformers | src/transformers/trainer_callback.py | {
"start": 12795,
"end": 18318
} | class ____:
# no-format
"""
A class for objects that will inspect the state of the training loop at some events and take some decisions. At
each of those events the following arguments are available:
Args:
args ([`TrainingArguments`]):
The training arguments used to instantiate the [`Trainer`].
state ([`TrainerState`]):
The current state of the [`Trainer`].
control ([`TrainerControl`]):
The object that is returned to the [`Trainer`] and can be used to make some decisions.
model ([`PreTrainedModel`] or `torch.nn.Module`):
The model being trained.
processing_class ([`PreTrainedTokenizer` or `BaseImageProcessor` or `ProcessorMixin` or `FeatureExtractionMixin`]):
The processing class used for encoding the data. Can be a tokenizer, a processor, an image processor or a feature extractor.
optimizer (`torch.optim.Optimizer`):
The optimizer used for the training steps.
lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`):
The scheduler used for setting the learning rate.
train_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for training.
eval_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for evaluation.
metrics (`dict[str, float]`):
The metrics computed by the last evaluation phase.
Those are only accessible in the event `on_evaluate`.
logs (`dict[str, float]`):
The values to log.
Those are only accessible in the event `on_log`.
The `control` object is the only one that can be changed by the callback, in which case the event that changes it
should return the modified version.
The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`.
You can unpack the ones you need in the signature of the event using them. As an example, see the code of the
simple [`~transformers.PrinterCallback`].
Example:
```python
class PrinterCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
```"""
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of the initialization of the [`Trainer`].
"""
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of training.
"""
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of training.
"""
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of an epoch.
"""
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an epoch.
"""
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called before the optimizer step but after gradient clipping. Useful for monitoring gradients.
"""
def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients.
"""
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an substep during gradient accumulation.
"""
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after an evaluation phase.
"""
def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs):
"""
Event called after a successful prediction.
"""
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a checkpoint save.
"""
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after logging the last logs.
"""
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a prediction step.
"""
| TrainerCallback |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_wmt.py | {
"start": 13916,
"end": 19182
} | class ____(nn.Module):
"""Transformer Model for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
def setup(self):
config = self.config
if config.share_embeddings:
if config.output_vocab_size is not None:
assert config.output_vocab_size == config.vocab_size, (
"can't share embedding with different vocab sizes.")
self.shared_embedding = nn.Embed(
num_embeddings=config.vocab_size,
features=config.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
self.shared_embedding = None
self.encoder = Encoder(
config=config, shared_embedding=self.shared_embedding)
self.decoder = Decoder(
config=config, shared_embedding=self.shared_embedding)
def encode(self,
inputs,
inputs_positions=None,
inputs_segmentation=None):
"""Applies Transformer encoder-branch on the inputs.
Args:
inputs: input data.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
Returns:
encoded feature array from the transformer encoder.
"""
config = self.config
# Make padding attention mask.
encoder_mask = nn.make_attention_mask(
inputs > 0, inputs > 0, dtype=config.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if inputs_segmentation is not None:
encoder_mask = nn.combine_masks(
encoder_mask,
nn.make_attention_mask(
inputs_segmentation,
inputs_segmentation,
jnp.equal,
dtype=config.dtype))
return self.encoder(
inputs,
inputs_positions=inputs_positions,
encoder_mask=encoder_mask)
def decode(self,
encoded,
inputs, # only needed for masks
targets,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
inputs: input data (only needed for masking).
targets: target data.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from transformer decoder.
"""
config = self.config
# Make padding attention masks.
if config.decode:
# for fast autoregressive decoding only a special encoder-decoder mask is used
decoder_mask = None
encoder_decoder_mask = nn.make_attention_mask(
jnp.ones_like(targets) > 0, inputs > 0, dtype=config.dtype)
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(targets > 0, targets > 0, dtype=config.dtype),
nn.make_causal_mask(targets, dtype=config.dtype))
encoder_decoder_mask = nn.make_attention_mask(
targets > 0, inputs > 0, dtype=config.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if inputs_segmentation is not None:
decoder_mask = nn.combine_masks(
decoder_mask,
nn.make_attention_mask(
targets_segmentation,
targets_segmentation,
jnp.equal,
dtype=config.dtype))
encoder_decoder_mask = nn.combine_masks(
encoder_decoder_mask,
nn.make_attention_mask(
targets_segmentation,
inputs_segmentation,
jnp.equal,
dtype=config.dtype))
logits = self.decoder(
encoded,
targets,
targets_positions=targets_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
return logits.astype(self.config.dtype)
def __call__(self,
inputs,
targets,
inputs_positions=None,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data.
targets: target data.
inputs_positions: input subsequence positions for packed examples.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from full transformer.
"""
encoded = self.encode(inputs,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation)
return self.decode(encoded,
inputs, # only used for masks
targets,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation)
| Transformer |
python | pytest-dev__pytest | testing/test_junitxml.py | {
"start": 4532,
"end": 5936
} | class ____:
"""minimal test to increase coverage for methods that are used in debugging"""
@pytest.fixture
def document(self) -> DomDocument:
doc = minidom.parseString("""
<root>
<item name="a"></item>
<item name="b"></item>
</root>
""")
return DomDocument(doc)
def test_uc_root(self, document: DomDocument) -> None:
assert document.get_unique_child.tag == "root"
def test_node_assert_attr(self, document: DomDocument) -> None:
item = document.get_first_by_tag("item")
item.assert_attr(name="a")
with pytest.raises(AssertionError):
item.assert_attr(missing="foo")
def test_node_getitem(self, document: DomDocument) -> None:
item = document.get_first_by_tag("item")
assert item["name"] == "a"
with pytest.raises(KeyError, match="missing"):
item["missing"]
def test_node_get_first_lookup(self, document: DomDocument) -> None:
with pytest.raises(LookupError, match="missing"):
document.get_first_by_tag("missing")
def test_node_repr(self, document: DomDocument) -> None:
item = document.get_first_by_tag("item")
assert repr(item) == item.toxml()
assert item.toxml() == '<item name="a"/>'
parametrize_families = pytest.mark.parametrize("xunit_family", ["xunit1", "xunit2"])
| TestJunitHelpers |
python | yaml__pyyaml | lib/yaml/cyaml.py | {
"start": 891,
"end": 1096
} | class ____(CParser, UnsafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
UnsafeConstructor.__init__(self)
Resolver.__init__(self)
| CUnsafeLoader |
python | sqlalchemy__sqlalchemy | test/engine/test_transaction.py | {
"start": 996,
"end": 38434
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.fixture
def local_connection(self):
with testing.db.connect() as conn:
yield conn
def test_commits(self, local_connection):
users = self.tables.users
connection = local_connection
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
transaction.commit()
transaction = connection.begin()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 3
transaction.commit()
connection.close()
def test_rollback(self, local_connection):
"""test a basic rollback"""
users = self.tables.users
connection = local_connection
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
transaction.rollback()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 0
def test_raise(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
try:
connection.execute(
users.insert(), dict(user_id=1, user_name="user1")
)
connection.execute(
users.insert(), dict(user_id=2, user_name="user2")
)
connection.execute(
users.insert(), dict(user_id=1, user_name="user3")
)
transaction.commit()
assert False
except Exception as e:
print("Exception: ", e)
transaction.rollback()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 0
def test_rollback_end_ctx_manager_autocommit(self, local_connection):
m1 = mock.Mock()
event.listen(local_connection, "rollback", m1.rollback)
event.listen(local_connection, "commit", m1.commit)
with local_connection.begin() as trans:
assert local_connection.in_transaction()
trans.rollback()
assert not local_connection.in_transaction()
# previously, would be subject to autocommit.
# now it raises
with expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside context manager. "
"Please complete the context manager before emitting "
"further commands.",
):
local_connection.execute(select(1))
assert not local_connection.in_transaction()
@testing.combinations((True,), (False,), argnames="roll_back_in_block")
@testing.requires.savepoints
def test_ctxmanager_rolls_back_savepoint(
self, local_connection, roll_back_in_block
):
m1 = mock.Mock()
event.listen(
local_connection, "rollback_savepoint", m1.rollback_savepoint
)
event.listen(local_connection, "rollback", m1.rollback)
event.listen(local_connection, "commit", m1.commit)
with local_connection.begin() as trans:
with expect_raises_message(Exception, "test"):
with local_connection.begin_nested() as nested_trans:
if roll_back_in_block:
nested_trans.rollback()
if 1 == 1:
raise Exception("test")
assert not nested_trans.is_active
assert nested_trans._deactivated_from_connection
assert trans.is_active
assert local_connection.in_transaction()
assert not trans._deactivated_from_connection
eq_(
m1.mock_calls,
[
mock.call.rollback_savepoint(
local_connection, mock.ANY, mock.ANY
),
mock.call.commit(local_connection),
],
)
def test_deactivated_warning_straight(self, local_connection):
with expect_warnings(
"transaction already deassociated from connection"
):
trans = local_connection.begin()
trans.rollback()
trans.rollback()
@testing.requires.savepoints
def test_deactivated_savepoint_warning_straight(self, local_connection):
with expect_warnings(
"nested transaction already deassociated from connection"
):
with local_connection.begin():
savepoint = local_connection.begin_nested()
savepoint.rollback()
savepoint.rollback()
def test_commit_fails_flat(self, local_connection):
connection = local_connection
t1 = connection.begin()
with mock.patch.object(
connection,
"_commit_impl",
mock.Mock(side_effect=exc.DBAPIError("failure", None, None, None)),
):
assert_raises_message(exc.DBAPIError, r"failure", t1.commit)
assert not t1.is_active
t1.rollback() # no error
def test_commit_fails_ctxmanager(self, local_connection):
connection = local_connection
transaction = [None]
def go():
with mock.patch.object(
connection,
"_commit_impl",
mock.Mock(
side_effect=exc.DBAPIError("failure", None, None, None)
),
):
with connection.begin() as t1:
transaction[0] = t1
assert_raises_message(exc.DBAPIError, r"failure", go)
t1 = transaction[0]
assert not t1.is_active
with expect_warnings(
"transaction already deassociated from connection"
):
t1.rollback() # no error
@testing.requires.savepoints_w_release
def test_savepoint_rollback_fails_flat(self, local_connection):
connection = local_connection
t1 = connection.begin()
s1 = connection.begin_nested()
# force the "commit" of the savepoint that occurs
# when the "with" block fails, e.g.
# the RELEASE, to fail, because the savepoint is already
# released.
connection.dialect.do_release_savepoint(connection, s1._savepoint)
assert_raises_message(
exc.DBAPIError, r".*SQL\:.*ROLLBACK TO SAVEPOINT", s1.rollback
)
assert not s1.is_active
with testing.expect_warnings("nested transaction already"):
s1.rollback() # no error (though it warns)
# this test was previously calling "commit", but note relies on
# buggy behavior in PostgreSQL as the transaction block is in fact
# aborted. pg8000 enforces this on the client as of 1.29
t1.rollback() # no error
@testing.requires.savepoints_w_release
def test_savepoint_release_fails_flat(self):
with testing.db.connect() as connection:
t1 = connection.begin()
s1 = connection.begin_nested()
# force the "commit" of the savepoint that occurs
# when the "with" block fails, e.g.
# the RELEASE, to fail, because the savepoint is already
# released.
connection.dialect.do_release_savepoint(connection, s1._savepoint)
assert_raises_message(
exc.DBAPIError, r".*SQL\:.*RELEASE SAVEPOINT", s1.commit
)
assert not s1.is_active
s1.rollback() # no error. prior to 1.4 this would try to rollback
# this test was previously calling "commit", but note relies on
# buggy behavior in PostgreSQL as the transaction block is in fact
# aborted. pg8000 enforces this on the client as of 1.29
t1.rollback() # no error
@testing.requires.savepoints_w_release
def test_savepoint_release_fails_ctxmanager(self, local_connection):
connection = local_connection
connection.begin()
savepoint = [None]
def go():
with connection.begin_nested() as sp:
savepoint[0] = sp
# force the "commit" of the savepoint that occurs
# when the "with" block fails, e.g.
# the RELEASE, to fail, because the savepoint is already
# released.
connection.dialect.do_release_savepoint(
connection, sp._savepoint
)
# prior to SQLAlchemy 1.4, the above release would fail
# and then the savepoint would try to rollback, and that failed
# also, causing a long exception chain that under Python 2
# was particularly hard to diagnose, leading to issue
# #2696 which eventually impacted Openstack, and we
# had to add warnings that show what the "context" for an
# exception was. The SQL for the exception was
# ROLLBACK TO SAVEPOINT, and up the exception chain would be
# the RELEASE failing.
#
# now, when the savepoint "commit" fails, it sets itself as
# inactive. so it does not try to rollback and it cleans
# itself out appropriately.
#
exc_ = assert_raises_message(
exc.DBAPIError, r".*SQL\:.*RELEASE SAVEPOINT", go
)
savepoint = savepoint[0]
assert not savepoint.is_active
# ensure cause comes from the DBAPI
assert isinstance(exc_.__cause__, testing.db.dialect.dbapi.Error)
def test_retains_through_options(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
conn2 = connection.execution_options(dummy=True)
conn2.execute(users.insert(), dict(user_id=2, user_name="user2"))
transaction.rollback()
eq_(
connection.exec_driver_sql("select count(*) from users").scalar(),
0,
)
def test_ctxmanager_interface(self, local_connection):
# a legacy test, adapted for 2.x style, was called
# "test_with_interface". this is likely an early test for when
# the "with" construct was first added.
connection = local_connection
users = self.tables.users
trans = connection.begin()
with trans:
connection.execute(
users.insert(), dict(user_id=1, user_name="user1")
)
connection.execute(
users.insert(), dict(user_id=2, user_name="user2")
)
assert trans.is_active
assert not trans.is_active
eq_(
connection.exec_driver_sql("select count(*) from users").scalar(),
2,
)
connection.rollback()
def test_close(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
assert connection.in_transaction()
transaction.commit()
assert not connection.in_transaction()
result = connection.exec_driver_sql("select * from users")
eq_(len(result.fetchall()), 3)
def test_close2(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
assert connection.in_transaction()
transaction.close()
assert not connection.in_transaction()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 0
@testing.requires.independent_connections
def test_no_rollback_in_deactive(self, local_connection):
"""test #7388"""
def fail(*arg, **kw):
raise BaseException("some base exception")
with mock.patch.object(testing.db.dialect, "do_commit", fail):
with expect_raises_message(BaseException, "some base exception"):
with local_connection.begin():
pass
@testing.requires.independent_connections
@testing.requires.savepoints
def test_no_rollback_in_deactive_savepoint(self, local_connection):
"""test #7388"""
def fail(*arg, **kw):
raise BaseException("some base exception")
with mock.patch.object(
testing.db.dialect, "do_release_savepoint", fail
):
with local_connection.begin():
with expect_raises_message(
BaseException, "some base exception"
):
with local_connection.begin_nested():
pass
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
trans2 = connection.begin_nested()
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
trans2.rollback()
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
transaction.commit()
eq_(
connection.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,), (3,)],
)
@testing.requires.savepoints
def test_nested_subtransaction_commit(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
trans2 = connection.begin_nested()
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
trans2.commit()
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
transaction.commit()
eq_(
connection.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,), (3,)],
)
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin_twophase()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
transaction.prepare()
transaction.commit()
transaction = connection.begin_twophase()
connection.execute(users.insert(), dict(user_id=2, user_name="user2"))
transaction.commit()
transaction.close()
transaction = connection.begin_twophase()
connection.execute(users.insert(), dict(user_id=3, user_name="user3"))
transaction.rollback()
transaction = connection.begin_twophase()
connection.execute(users.insert(), dict(user_id=4, user_name="user4"))
transaction.prepare()
transaction.rollback()
transaction.close()
eq_(
connection.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,)],
)
@testing.requires.two_phase_transactions
@testing.requires.two_phase_recovery
@testing.variation("commit", [True, False])
def test_two_phase_recover(self, commit):
users = self.tables.users
# 2020, still can't get this to work w/ modern MySQL or MariaDB.
# the XA RECOVER comes back as bytes, OK, convert to string,
# XA COMMIT then says Unknown XID. Also, the drivers seem to be
# killing off the XID if I use the connection.invalidate() before
# trying to access in another connection. Not really worth it
# unless someone wants to step through how mysqlclient / pymysql
# support this correctly.
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(
users.insert(), dict(dict(user_id=1, user_name="user1"))
)
transaction.prepare()
connection.invalidate()
with testing.db.connect() as connection2:
eq_(
connection2.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[],
)
# recover_twophase needs to be run in a new transaction
with testing.db.connect() as connection3:
# oracle transactions can't be recovered for commit after...
# about 1 second? OK
with testing.skip_if_timeout(
0.50,
cleanup=(
lambda: connection3.rollback_prepared(
transaction.xid, recover=True
)
),
):
recoverables = connection3.recover_twophase()
assert transaction.xid in recoverables
if commit:
connection3.commit_prepared(transaction.xid, recover=True)
res = [(1,)]
else:
connection3.rollback_prepared(transaction.xid, recover=True)
res = []
stmt = select(users.c.user_id).order_by(users.c.user_id)
eq_(connection3.execute(stmt).fetchall(), res)
@testing.requires.two_phase_transactions
def test_multiple_two_phase(self, local_connection):
conn = local_connection
users = self.tables.users
xa = conn.begin_twophase()
conn.execute(users.insert(), dict(user_id=1, user_name="user1"))
xa.prepare()
xa.commit()
xa = conn.begin_twophase()
conn.execute(users.insert(), dict(user_id=2, user_name="user2"))
xa.prepare()
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), dict(user_id=3, user_name="user3"))
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), dict(user_id=4, user_name="user4"))
xa.prepare()
xa.commit()
result = conn.execute(
select(users.c.user_name).order_by(users.c.user_id)
)
eq_(result.fetchall(), [("user1",), ("user4",)])
@testing.requires.two_phase_transactions
def test_reset_rollback_two_phase_no_rollback(self):
# test [ticket:2907], essentially that the
# TwoPhaseTransaction is given the job of "reset on return"
# so that picky backends like MySQL correctly clear out
# their state when a connection is closed without handling
# the transaction explicitly.
users = self.tables.users
eng = testing_engine()
# MySQL raises if you call straight rollback() on
# a connection with an XID present
@event.listens_for(eng, "invalidate")
def conn_invalidated(dbapi_con, con_record, exception):
if exception is not None:
dbapi_con.close()
raise exception
with eng.connect() as conn:
rec = conn.connection._connection_record
raw_dbapi_con = rec.dbapi_connection
conn.begin_twophase()
conn.execute(users.insert(), dict(user_id=1, user_name="user1"))
assert rec.dbapi_connection is raw_dbapi_con
with eng.connect() as conn:
result = conn.execute(
select(users.c.user_name).order_by(users.c.user_id)
)
eq_(result.fetchall(), [])
def test_interrupt_ctxmanager_engine(self, trans_ctx_manager_fixture):
fn = trans_ctx_manager_fixture
fn(testing.db, trans_on_subject=False, execute_on_subject=False)
@testing.combinations((True,), (False,), argnames="trans_on_subject")
def test_interrupt_ctxmanager_connection(
self, trans_ctx_manager_fixture, trans_on_subject
):
fn = trans_ctx_manager_fixture
with testing.db.connect() as conn:
fn(
conn,
trans_on_subject=trans_on_subject,
execute_on_subject=True,
)
def test_autobegin_rollback(self):
users = self.tables.users
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.rollback()
eq_(conn.scalar(select(func.count(1)).select_from(users)), 0)
@testing.requires.autocommit
def test_autocommit_isolation_level(self):
users = self.tables.users
with testing.db.connect().execution_options(
isolation_level="AUTOCOMMIT"
) as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.rollback()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.autocommit
def test_no_autocommit_w_begin(self):
with testing.db.begin() as conn:
assert_raises_message(
exc.InvalidRequestError,
r"This connection has already initialized a SQLAlchemy "
r"Transaction\(\) object via begin\(\) or autobegin; "
r"isolation_level may not be altered unless rollback\(\) or "
r"commit\(\) is called first.",
conn.execution_options,
isolation_level="AUTOCOMMIT",
)
@testing.requires.autocommit
def test_no_autocommit_w_autobegin(self):
with testing.db.connect() as conn:
conn.execute(select(1))
assert_raises_message(
exc.InvalidRequestError,
r"This connection has already initialized a SQLAlchemy "
r"Transaction\(\) object via begin\(\) or autobegin; "
r"isolation_level may not be altered unless rollback\(\) or "
r"commit\(\) is called first.",
conn.execution_options,
isolation_level="AUTOCOMMIT",
)
conn.rollback()
conn.execution_options(isolation_level="AUTOCOMMIT")
def test_autobegin_commit(self):
users = self.tables.users
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
assert conn.in_transaction()
conn.commit()
assert not conn.in_transaction()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
conn.execute(users.insert(), {"user_id": 2, "user_name": "name 2"})
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
assert conn.in_transaction()
conn.rollback()
assert not conn.in_transaction()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
def test_rollback_on_close(self):
canary = mock.Mock()
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
conn.execute(select(1))
assert conn.in_transaction()
eq_(canary.mock_calls, [mock.call(conn)])
def test_no_on_close_no_transaction(self):
canary = mock.Mock()
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
conn.execute(select(1))
conn.rollback()
assert not conn.in_transaction()
eq_(canary.mock_calls, [mock.call(conn)])
def test_rollback_on_exception(self):
canary = mock.Mock()
try:
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
conn.execute(select(1))
assert conn.in_transaction()
raise Exception("some error")
assert False
except:
pass
eq_(canary.mock_calls, [mock.call(conn)])
def test_rollback_on_exception_if_no_trans(self):
canary = mock.Mock()
try:
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
assert not conn.in_transaction()
raise Exception("some error")
assert False
except:
pass
eq_(canary.mock_calls, [])
def test_commit_no_begin(self):
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.commit()
@testing.requires.independent_connections
def test_commit_inactive(self):
with testing.db.connect() as conn:
conn.begin()
conn.invalidate()
assert_raises_message(
exc.InvalidRequestError, "Can't reconnect until", conn.commit
)
@testing.requires.independent_connections
def test_rollback_inactive(self):
users = self.tables.users
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.commit()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
conn.invalidate()
assert_raises_message(
exc.PendingRollbackError,
"Can't reconnect",
conn.execute,
select(1),
)
conn.rollback()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
def test_rollback_no_begin(self):
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.rollback()
def test_rollback_end_ctx_manager(self):
with testing.db.begin() as conn:
assert conn.in_transaction()
conn.rollback()
assert not conn.in_transaction()
def test_rollback_end_ctx_manager_autobegin(self, local_connection):
m1 = mock.Mock()
event.listen(local_connection, "rollback", m1.rollback)
event.listen(local_connection, "commit", m1.commit)
with local_connection.begin() as trans:
assert local_connection.in_transaction()
trans.rollback()
assert not local_connection.in_transaction()
# previously, would be subject to autocommit.
# now it raises
with expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside context manager. "
"Please complete the context manager before emitting "
"further commands.",
):
local_connection.execute(select(1))
assert not local_connection.in_transaction()
@testing.combinations((True,), (False,), argnames="roll_back_in_block")
def test_ctxmanager_rolls_back(self, local_connection, roll_back_in_block):
m1 = mock.Mock()
event.listen(local_connection, "rollback", m1.rollback)
event.listen(local_connection, "commit", m1.commit)
with expect_raises_message(Exception, "test"):
with local_connection.begin() as trans:
if roll_back_in_block:
trans.rollback()
if 1 == 1:
raise Exception("test")
assert not trans.is_active
assert not local_connection.in_transaction()
assert trans._deactivated_from_connection
eq_(m1.mock_calls, [mock.call.rollback(local_connection)])
@testing.requires.savepoints
def test_ctxmanager_autobegins_real_trans_from_nested(
self, local_connection
):
# the legacy version of this test in 1.4
# was test_ctxmanager_commits_real_trans_from_nested
m1 = mock.Mock()
event.listen(
local_connection, "rollback_savepoint", m1.rollback_savepoint
)
event.listen(
local_connection, "release_savepoint", m1.release_savepoint
)
event.listen(local_connection, "rollback", m1.rollback)
event.listen(local_connection, "commit", m1.commit)
event.listen(local_connection, "begin", m1.begin)
event.listen(local_connection, "savepoint", m1.savepoint)
with local_connection.begin_nested() as nested_trans:
pass
assert not nested_trans.is_active
assert nested_trans._deactivated_from_connection
eq_(
m1.mock_calls,
[
mock.call.begin(local_connection),
mock.call.savepoint(local_connection, mock.ANY),
mock.call.release_savepoint(
local_connection, mock.ANY, mock.ANY
),
],
)
def test_explicit_begin(self):
users = self.tables.users
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.begin()
assert conn.in_transaction()
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.commit()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
def test_no_double_begin(self):
with testing.db.connect() as conn:
conn.begin()
assert_raises_message(
exc.InvalidRequestError,
r"This connection has already initialized a SQLAlchemy "
r"Transaction\(\) object via begin\(\) or autobegin; can't "
r"call begin\(\) here unless rollback\(\) or commit\(\) is "
r"called first.",
conn.begin,
)
def test_no_autocommit(self):
users = self.tables.users
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
0,
)
def test_begin_block(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.savepoints
def test_savepoint_one(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
savepoint = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
savepoint.rollback()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.savepoints
def test_savepoint_two(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
savepoint = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
savepoint.commit()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
@testing.requires.savepoints
def test_savepoint_three(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
conn.rollback()
assert not conn.in_transaction()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
0,
)
@testing.requires.savepoints
def test_savepoint_four(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
sp1 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
sp2.rollback()
assert not sp2.is_active
assert sp1.is_active
assert conn.in_transaction()
assert not sp1.is_active
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
@testing.requires.savepoints
def test_savepoint_five(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
sp2.commit()
assert conn.in_transaction()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
3,
)
@testing.requires.savepoints
def test_savepoint_six(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
sp1 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
assert conn._nested_transaction is sp1
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
assert conn._nested_transaction is sp2
sp2.commit()
assert conn._nested_transaction is sp1
sp1.rollback()
assert conn._nested_transaction is None
assert conn.in_transaction()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.savepoints
def test_savepoint_seven(self):
users = self.tables.users
with testing.db.connect() as conn:
trans = conn.begin()
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
sp1 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
assert conn.in_transaction()
trans.close()
assert not sp1.is_active
assert not sp2.is_active
assert not trans.is_active
assert conn._transaction is None
assert conn._nested_transaction is None
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
0,
)
| TransactionTest |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 13206,
"end": 13602
} | class ____:
"""Test es_ES phone number provider methods"""
def test_phone_number(self, faker, num_samples):
pattern: Pattern = re.compile(
r"\+34 ?(?:7[0-4]|[689]\d)\d" r"(?: \d{3} \d{3}|\d{6}| \d{2} \d{2} \d{2})",
)
for _ in range(num_samples):
phone_number = faker.phone_number()
assert pattern.fullmatch(phone_number)
| TestEsEs |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 11484,
"end": 12018
} | class ____(CanvasCase):
def test_apply(self):
for type, attr in [(xmap, 'map'), (xstarmap, 'starmap')]:
args = [(i, i) for i in range(10)]
s = getattr(self.add, attr)(args)
s.type = Mock()
s.apply_async(foo=1)
s.type.apply_async.assert_called_with(
(), {'task': self.add.s(), 'it': args}, foo=1,
route_name=self.add.name,
)
assert type.from_dict(dict(s)) == s
assert repr(s)
| test_xmap_xstarmap |
python | walkccc__LeetCode | solutions/224. Basic Calculator/224.py | {
"start": 0,
"end": 470
} | class ____:
def calculate(self, s: str) -> int:
ans = 0
num = 0
sign = 1
stack = [sign] # stack[-1]: the current environment's sign
for c in s:
if c.isdigit():
num = num * 10 + int(c)
elif c == '(':
stack.append(sign)
elif c == ')':
stack.pop()
elif c == '+' or c == '-':
ans += sign * num
sign = (1 if c == '+' else -1) * stack[-1]
num = 0
return ans + sign * num
| Solution |
python | gevent__gevent | src/gevent/local.py | {
"start": 6424,
"end": 6990
} | class ____(object):
__slots__ = ('key', 'wrthread', 'greenlet_deleted')
def __init__(self, key, wrthread, greenlet_deleted):
self.key = key
self.wrthread = wrthread
self.greenlet_deleted = greenlet_deleted
def __call__(self, _unused):
thread = self.wrthread()
if thread is not None:
try:
unlink = thread.unlink
except AttributeError:
pass
else:
unlink(self.greenlet_deleted)
del thread.__dict__[self.key]
| _local_deleted |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/skills/version_create_params.py | {
"start": 386,
"end": 813
} | class ____(TypedDict, total=False):
files: Optional[SequenceNotStr[FileTypes]]
"""Files to upload for the skill.
All files must be in the same top-level directory and must include a SKILL.md
file at the root of that directory.
"""
betas: Annotated[List[AnthropicBetaParam], PropertyInfo(alias="anthropic-beta")]
"""Optional header to specify the beta version(s) you want to use."""
| VersionCreateParams |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_critic.py | {
"start": 2753,
"end": 4632
} | class ____(util.MdCase):
"""Test accept mode."""
extension = [
'pymdownx.critic'
]
extension_configs = {
"pymdownx.critic": {
"mode": 'accept'
}
}
def test_accept(self):
"""Test accept mode."""
self.check_markdown(
R"""
Here is some {--*incorrect*--} Markdown. I am adding this{++ here.++}. Here is some more {--text
that I am removing--}text. And here is even more {++text that I
am ++}adding.{~~
~> ~~}Paragraph was deleted and replaced with some spaces.{~~ ~>
~~}Spaces were removed and a paragraph was added.
And here is a comment on {==some
==text== ==}{>>This works quite well. I just wanted to comment on it.<<}. Substitutions {~~is~>are~~} great!
General block handling.
{--
* test
* test
* test
* test
* test
--}
{++
* test
* test
* test
* test
* test
++}
""", # noqa: E501
"""
<p>Here is some Markdown. I am adding this here.. Here is some more text. And here is even more text that I
am adding. Paragraph was deleted and replaced with some spaces.</p>
<p>Spaces were removed and a paragraph was added.</p>
<p>And here is a comment on some
==text== . Substitutions are great!</p>
<p>General block handling.</p>
<ul>
<li>test<ul>
<li>test</li>
</ul>
</li>
<li>test</li>
<li>test</li>
<li>test</li>
</ul>
""", # noqa: E501
True
)
| TestCriticAcceptMode |
python | django__django | tests/queries/tests.py | {
"start": 171899,
"end": 173080
} | class ____(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name="jackstaff")
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name="jillstaff")
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
"creator__staffuser__staff", "owner__staffuser__staff"
)
self.assertEqual(str(qs.query).count(" JOIN "), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(
task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff,
)
self.assertEqual(
task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff,
)
| Ticket20955Tests |
python | tensorflow__tensorflow | tensorflow/python/framework/c_api_util.py | {
"start": 4310,
"end": 4546
} | class ____(UniquePtr):
"""Wrapper around TF_Function that handles deletion."""
def __init__(self, func, name):
super(ScopedTFFunction, self).__init__(
name=name, obj=func, deleter=c_api.TF_DeleteFunction)
| ScopedTFFunction |
python | benfred__py-spy | tests/integration_test.py | {
"start": 311,
"end": 4715
} | class ____(unittest.TestCase):
"""Basic tests of using py-spy as a commandline application"""
def _sample_process(self, script_name, options=None, include_profile_name=False):
if not PYSPY:
raise ValueError("Failed to find py-spy on the path")
# for permissions reasons, we really want to run the sampled python process as a
# subprocess of the py-spy (works best on linux etc). So we're running the
# record option, and setting different flags. To get the profile output
# we're using the speedscope format (since we can read that in as json)
with tempfile.NamedTemporaryFile() as profile_file:
filename = profile_file.name
if sys.platform.startswith("win"):
filename = "profile.json"
cmdline = [
PYSPY,
"record",
"-o",
filename,
"--format",
"speedscope",
"-d",
"2",
]
cmdline.extend(options or [])
cmdline.extend(["--", sys.executable, script_name])
env = dict(os.environ, RUST_LOG="info")
subprocess.check_output(cmdline, env=env)
with open(filename) as f:
profiles = json.load(f)
frames = profiles["shared"]["frames"]
samples = defaultdict(int)
for p in profiles["profiles"]:
for sample in p["samples"]:
if include_profile_name:
samples[
tuple(
[p["name"]] + [Frame(**frames[frame]) for frame in sample]
)
] += 1
else:
samples[tuple(Frame(**frames[frame]) for frame in sample)] += 1
return samples
def test_longsleep(self):
# running with the gil flag should have ~ no samples returned
if GIL:
profile = self._sample_process(_get_script("longsleep.py"), GIL)
print(profile)
assert sum(profile.values()) <= 10
# running with the idle flag should have > 95% of samples in the sleep call
profile = self._sample_process(_get_script("longsleep.py"), ["--idle"])
sample, count = _most_frequent_sample(profile)
assert count >= 95
assert len(sample) == 2
assert sample[0].name == "<module>"
assert sample[0].line == 9
assert sample[1].name == "longsleep"
assert sample[1].line == 5
def test_busyloop(self):
# can't be sure what line we're on, but we should have ~ all samples holding the gil
profile = self._sample_process(_get_script("busyloop.py"), GIL)
assert sum(profile.values()) >= 95
def test_thread_names(self):
# we don't support getting thread names on python < 3.6
v = sys.version_info
if v.major < 3 or v.minor < 6:
return
for _ in range(3):
profile = self._sample_process(
_get_script("thread_names.py"),
["--threads", "--idle"],
include_profile_name=True,
)
expected_thread_names = set("CustomThreadName-" + str(i) for i in range(10))
expected_thread_names.add("MainThread")
name_re = re.compile(r"\"(.*)\"")
actual_thread_names = {name_re.search(p[0]).groups()[0] for p in profile}
if expected_thread_names == actual_thread_names:
break
if expected_thread_names != actual_thread_names:
print(
"failed to get thread names",
expected_thread_names,
actual_thread_names,
)
assert expected_thread_names == actual_thread_names
def test_shell_completions(self):
cmdline = [PYSPY, "completions", "bash"]
subprocess.check_output(cmdline)
def _get_script(name):
base_dir = os.path.dirname(__file__)
return os.path.join(base_dir, "scripts", name)
def _most_frequent_sample(samples):
frames, count = max(samples.items(), key=lambda x: x[1])
# lets normalize as a percentage here, rather than raw number of samples
return frames, int(100 * count / sum(samples.values()))
if __name__ == "__main__":
print("Testing py-spy @", PYSPY)
unittest.main()
| TestPyspy |
python | django__django | django/forms/widgets.py | {
"start": 18776,
"end": 19145
} | class ____(TextInput):
format_key = ""
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format or None
def format_value(self, value):
return formats.localize_input(
value, self.format or formats.get_format(self.format_key)[0]
)
| DateTimeBaseInput |
python | django__django | tests/migrations/test_base.py | {
"start": 15694,
"end": 17073
} | class ____(SimpleTestCase):
"""Common functions to help test the optimizer."""
def optimize(self, operations, app_label):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations, app_label), optimizer._iterations
def serialize(self, value):
return serializer_factory(value).serialize()[0]
def assertOptimizesTo(
self, operations, expected, exact=None, less_than=None, app_label=None
):
result, iterations = self.optimize(operations, app_label or "migrations")
result = [self.serialize(f) for f in result]
expected = [self.serialize(f) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException(
"Optimization did not take exactly %s iterations (it took %s)"
% (exact, iterations)
)
if less_than is not None and iterations >= less_than:
raise self.failureException(
"Optimization did not take less than %s iterations (it took %s)"
% (less_than, iterations)
)
def assertDoesNotOptimize(self, operations, **kwargs):
self.assertOptimizesTo(operations, operations, **kwargs)
| OptimizerTestBase |
python | PrefectHQ__prefect | src/prefect/client/schemas/responses.py | {
"start": 651,
"end": 860
} | class ____(AutoEnum):
"""Enumerates return statuses for setting run states."""
ACCEPT = AutoEnum.auto()
REJECT = AutoEnum.auto()
ABORT = AutoEnum.auto()
WAIT = AutoEnum.auto()
| SetStateStatus |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/evaluator.py | {
"start": 881,
"end": 943
} | class ____(exc.InvalidRequestError):
pass
| UnevaluatableError |
python | numpy__numpy | numpy/lib/tests/test_arraypad.py | {
"start": 729,
"end": 3513
} | class ____:
def test_single_value(self):
"""Test casting for a single value."""
expected = np.array([[3, 3]] * 10)
for x in (3, [3], [[3]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
# Test with dtype=object
obj = object()
assert_equal(
_as_pairs(obj, 10),
np.array([[obj, obj]] * 10)
)
def test_two_values(self):
"""Test proper casting for two different values."""
# Broadcasting in the first dimension with numbers
expected = np.array([[3, 4]] * 10)
for x in ([3, 4], [[3, 4]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
# and with dtype=object
obj = object()
assert_equal(
_as_pairs(["a", obj], 10),
np.array([["a", obj]] * 10)
)
# Broadcasting in the second / last dimension with numbers
assert_equal(
_as_pairs([[3], [4]], 2),
np.array([[3, 3], [4, 4]])
)
# and with dtype=object
assert_equal(
_as_pairs([["a"], [obj]], 2),
np.array([["a", "a"], [obj, obj]])
)
def test_with_none(self):
expected = ((None, None), (None, None), (None, None))
assert_equal(
_as_pairs(None, 3, as_index=False),
expected
)
assert_equal(
_as_pairs(None, 3, as_index=True),
expected
)
def test_pass_through(self):
"""Test if `x` already matching desired output are passed through."""
expected = np.arange(12).reshape((6, 2))
assert_equal(
_as_pairs(expected, 6),
expected
)
def test_as_index(self):
"""Test results if `as_index=True`."""
assert_equal(
_as_pairs([2.6, 3.3], 10, as_index=True),
np.array([[3, 3]] * 10, dtype=np.intp)
)
assert_equal(
_as_pairs([2.6, 4.49], 10, as_index=True),
np.array([[3, 4]] * 10, dtype=np.intp)
)
for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
[[1, 2]] * 9 + [[1, -2]]):
with pytest.raises(ValueError, match="negative values"):
_as_pairs(x, 10, as_index=True)
def test_exceptions(self):
"""Ensure faulty usage is discovered."""
with pytest.raises(ValueError, match="more dimensions than allowed"):
_as_pairs([[[3]]], 10)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs([[1, 2], [3, 4]], 3)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs(np.ones((2, 3)), 3)
| TestAsPairs |
python | getsentry__sentry | src/sentry/api/serializers/models/broadcast.py | {
"start": 176,
"end": 1069
} | class ____(Serializer):
def get_attrs(self, item_list, user, **kwargs):
if not user.is_authenticated:
seen = set()
else:
seen = set(
BroadcastSeen.objects.filter(broadcast__in=item_list, user_id=user.id).values_list(
"broadcast", flat=True
)
)
return {item: {"seen": item.id in seen} for item in item_list}
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": str(obj.id),
"message": obj.message,
"title": obj.title,
"link": obj.link,
"mediaUrl": obj.media_url,
"isActive": obj.is_active,
"dateCreated": obj.date_added,
"dateExpires": obj.date_expires,
"hasSeen": attrs["seen"],
"category": obj.category,
}
| BroadcastSerializer |
python | redis__redis-py | tests/test_maint_notifications.py | {
"start": 25503,
"end": 31234
} | class ____:
"""Test the MaintNotificationsConnectionHandler class."""
def setup_method(self):
"""Set up test fixtures."""
self.mock_connection = Mock()
self.config = MaintNotificationsConfig(enabled=True, relaxed_timeout=20)
self.handler = MaintNotificationsConnectionHandler(
self.mock_connection, self.config
)
def test_init(self):
"""Test MaintNotificationsConnectionHandler initialization."""
assert self.handler.connection == self.mock_connection
assert self.handler.config == self.config
def test_handle_notification_migrating(self):
"""Test handling of NodeMigratingNotification."""
notification = NodeMigratingNotification(id=1, ttl=5)
with patch.object(
self.handler, "handle_maintenance_start_notification"
) as mock_handle:
self.handler.handle_notification(notification)
mock_handle.assert_called_once_with(MaintenanceState.MAINTENANCE)
def test_handle_notification_migrated(self):
"""Test handling of NodeMigratedNotification."""
notification = NodeMigratedNotification(id=1)
with patch.object(
self.handler, "handle_maintenance_completed_notification"
) as mock_handle:
self.handler.handle_notification(notification)
mock_handle.assert_called_once_with()
def test_handle_notification_failing_over(self):
"""Test handling of NodeFailingOverNotification."""
notification = NodeFailingOverNotification(id=1, ttl=5)
with patch.object(
self.handler, "handle_maintenance_start_notification"
) as mock_handle:
self.handler.handle_notification(notification)
mock_handle.assert_called_once_with(MaintenanceState.MAINTENANCE)
def test_handle_notification_failed_over(self):
"""Test handling of NodeFailedOverNotification."""
notification = NodeFailedOverNotification(id=1)
with patch.object(
self.handler, "handle_maintenance_completed_notification"
) as mock_handle:
self.handler.handle_notification(notification)
mock_handle.assert_called_once_with()
def test_handle_notification_unknown_type(self):
"""Test handling of unknown notification type."""
notification = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
result = self.handler.handle_notification(notification)
assert result is None
def test_handle_maintenance_start_notification_disabled(self):
"""Test maintenance start notification handling when relaxed timeouts are disabled."""
config = MaintNotificationsConfig(relaxed_timeout=-1)
handler = MaintNotificationsConnectionHandler(self.mock_connection, config)
result = handler.handle_maintenance_start_notification(
MaintenanceState.MAINTENANCE
)
assert result is None
self.mock_connection.update_current_socket_timeout.assert_not_called()
def test_handle_maintenance_start_notification_moving_state(self):
"""Test maintenance start notification handling when connection is in MOVING state."""
self.mock_connection.maintenance_state = MaintenanceState.MOVING
result = self.handler.handle_maintenance_start_notification(
MaintenanceState.MAINTENANCE
)
assert result is None
self.mock_connection.update_current_socket_timeout.assert_not_called()
def test_handle_maintenance_start_notification_success(self):
"""Test successful maintenance start notification handling for migrating."""
self.mock_connection.maintenance_state = MaintenanceState.NONE
self.handler.handle_maintenance_start_notification(MaintenanceState.MAINTENANCE)
assert self.mock_connection.maintenance_state == MaintenanceState.MAINTENANCE
self.mock_connection.update_current_socket_timeout.assert_called_once_with(20)
self.mock_connection.set_tmp_settings.assert_called_once_with(
tmp_relaxed_timeout=20
)
def test_handle_maintenance_completed_notification_disabled(self):
"""Test maintenance completed notification handling when relaxed timeouts are disabled."""
config = MaintNotificationsConfig(relaxed_timeout=-1)
handler = MaintNotificationsConnectionHandler(self.mock_connection, config)
result = handler.handle_maintenance_completed_notification()
assert result is None
self.mock_connection.update_current_socket_timeout.assert_not_called()
def test_handle_maintenance_completed_notification_moving_state(self):
"""Test maintenance completed notification handling when connection is in MOVING state."""
self.mock_connection.maintenance_state = MaintenanceState.MOVING
result = self.handler.handle_maintenance_completed_notification()
assert result is None
self.mock_connection.update_current_socket_timeout.assert_not_called()
def test_handle_maintenance_completed_notification_success(self):
"""Test successful maintenance completed notification handling."""
self.mock_connection.maintenance_state = MaintenanceState.MAINTENANCE
self.handler.handle_maintenance_completed_notification()
assert self.mock_connection.maintenance_state == MaintenanceState.NONE
self.mock_connection.update_current_socket_timeout.assert_called_once_with(-1)
self.mock_connection.reset_tmp_settings.assert_called_once_with(
reset_relaxed_timeout=True
)
| TestMaintNotificationsConnectionHandler |
python | django__django | tests/gis_tests/layermap/models.py | {
"start": 816,
"end": 1056
} | class ____(NamedModel):
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
class Meta:
app_label = "layermap"
# Same as `City` above, but for testing model inheritance.
| Interstate |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/kubernetes.py | {
"start": 1325,
"end": 1443
} | class ____(BaseModel, extra="forbid"):
type: str
port: int
annotations: Optional[Annotations] = None
| Service |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 2090,
"end": 3504
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
"""
loss: Optional[tuple[torch.FloatTensor]] = None
logits: Optional[tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
qformer_outputs: Optional[tuple[torch.FloatTensor]] = None
language_model_outputs: Optional[tuple[torch.FloatTensor]] = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k]
if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
else getattr(self, k).to_tuple()
for k in self.keys()
)
# Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->InstructBlip
| InstructBlipForConditionalGenerationModelOutput |
python | Textualize__textual | src/textual/validation.py | {
"start": 392,
"end": 2360
} | class ____:
"""The result of calling a `Validator.validate` method."""
failures: Sequence[Failure] = field(default_factory=list)
"""A list of reasons why the value was invalid. Empty if valid=True"""
@staticmethod
def merge(results: Sequence["ValidationResult"]) -> "ValidationResult":
"""Merge multiple ValidationResult objects into one.
Args:
results: List of ValidationResult objects to merge.
Returns:
Merged ValidationResult object.
"""
is_valid = all(result.is_valid for result in results)
failures = [failure for result in results for failure in result.failures]
if is_valid:
return ValidationResult.success()
else:
return ValidationResult.failure(failures)
@staticmethod
def success() -> ValidationResult:
"""Construct a successful ValidationResult.
Returns:
A successful ValidationResult.
"""
return ValidationResult()
@staticmethod
def failure(failures: Sequence[Failure]) -> ValidationResult:
"""Construct a failure ValidationResult.
Args:
failures: The failures.
Returns:
A failure ValidationResult.
"""
return ValidationResult(failures)
@property
def failure_descriptions(self) -> list[str]:
"""Utility for extracting failure descriptions as strings.
Useful if you don't care about the additional metadata included in the `Failure` objects.
Returns:
A list of the string descriptions explaining the failing validations.
"""
return [
failure.description
for failure in self.failures
if failure.description is not None
]
@property
def is_valid(self) -> bool:
"""True if the validation was successful."""
return len(self.failures) == 0
@dataclass
| ValidationResult |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 113300,
"end": 117580
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "剛才"
assert self.locale._format_timeframe("second", 1) == "1秒"
assert self.locale._format_timeframe("seconds", 30) == "30秒"
assert self.locale._format_timeframe("minute", 1) == "1分鐘"
assert self.locale._format_timeframe("minutes", 40) == "40分鐘"
assert self.locale._format_timeframe("hour", 1) == "1小時"
assert self.locale._format_timeframe("hours", 23) == "23小時"
assert self.locale._format_timeframe("day", 1) == "1天"
assert self.locale._format_timeframe("days", 12) == "12天"
assert self.locale._format_timeframe("week", 1) == "1星期"
assert self.locale._format_timeframe("weeks", 38) == "38星期"
assert self.locale._format_timeframe("month", 1) == "1個月"
assert self.locale._format_timeframe("months", 11) == "11個月"
assert self.locale._format_timeframe("year", 1) == "1年"
assert self.locale._format_timeframe("years", 12) == "12年"
assert self.locale._format_timeframe("second", -1) == "1秒"
assert self.locale._format_timeframe("seconds", -30) == "30秒"
assert self.locale._format_timeframe("minute", -1) == "1分鐘"
assert self.locale._format_timeframe("minutes", -40) == "40分鐘"
assert self.locale._format_timeframe("hour", -1) == "1小時"
assert self.locale._format_timeframe("hours", -23) == "23小時"
assert self.locale._format_timeframe("day", -1) == "1天"
assert self.locale._format_timeframe("days", -12) == "12天"
assert self.locale._format_timeframe("week", -1) == "1星期"
assert self.locale._format_timeframe("weeks", -38) == "38星期"
assert self.locale._format_timeframe("month", -1) == "1個月"
assert self.locale._format_timeframe("months", -11) == "11個月"
assert self.locale._format_timeframe("year", -1) == "1年"
assert self.locale._format_timeframe("years", -12) == "12年"
def test_format_relative_now(self):
assert self.locale._format_relative("剛才", "now", 0) == "剛才"
def test_format_relative_past(self):
assert self.locale._format_relative("1秒", "second", 1) == "1秒後"
assert self.locale._format_relative("2秒", "seconds", 2) == "2秒後"
assert self.locale._format_relative("1分鐘", "minute", 1) == "1分鐘後"
assert self.locale._format_relative("2分鐘", "minutes", 2) == "2分鐘後"
assert self.locale._format_relative("1小時", "hour", 1) == "1小時後"
assert self.locale._format_relative("2小時", "hours", 2) == "2小時後"
assert self.locale._format_relative("1天", "day", 1) == "1天後"
assert self.locale._format_relative("2天", "days", 2) == "2天後"
assert self.locale._format_relative("1星期", "week", 1) == "1星期後"
assert self.locale._format_relative("2星期", "weeks", 2) == "2星期後"
assert self.locale._format_relative("1個月", "month", 1) == "1個月後"
assert self.locale._format_relative("2個月", "months", 2) == "2個月後"
assert self.locale._format_relative("1年", "year", 1) == "1年後"
assert self.locale._format_relative("2年", "years", 2) == "2年後"
def test_format_relative_future(self):
assert self.locale._format_relative("1秒", "second", -1) == "1秒前"
assert self.locale._format_relative("2秒", "seconds", -2) == "2秒前"
assert self.locale._format_relative("1分鐘", "minute", -1) == "1分鐘前"
assert self.locale._format_relative("2分鐘", "minutes", -2) == "2分鐘前"
assert self.locale._format_relative("1小時", "hour", -1) == "1小時前"
assert self.locale._format_relative("2小時", "hours", -2) == "2小時前"
assert self.locale._format_relative("1天", "day", -1) == "1天前"
assert self.locale._format_relative("2天", "days", -2) == "2天前"
assert self.locale._format_relative("1星期", "week", -1) == "1星期前"
assert self.locale._format_relative("2星期", "weeks", -2) == "2星期前"
assert self.locale._format_relative("1個月", "month", -1) == "1個月前"
assert self.locale._format_relative("2個月", "months", -2) == "2個月前"
assert self.locale._format_relative("1年", "year", -1) == "1年前"
assert self.locale._format_relative("2年", "years", -2) == "2年前"
@pytest.mark.usefixtures("lang_locale")
| TestHongKongLocale |
python | sqlalchemy__sqlalchemy | test/ext/test_hybrid.py | {
"start": 1798,
"end": 8011
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _fixture(self, use_inplace=False, use_classmethod=False):
Base = declarative_base()
class UCComparator(hybrid.Comparator):
def __eq__(self, other):
if other is None:
return self.expression is None
else:
return func.upper(self.expression) == func.upper(other)
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
_value = Column("value", String)
@hybrid.hybrid_property
def value(self):
"This is a docstring"
return self._value - 5
if use_classmethod:
if use_inplace:
@value.inplace.comparator
@classmethod
def _value_comparator(cls):
return UCComparator(cls._value)
else:
@value.comparator
@classmethod
def value(cls):
return UCComparator(cls._value)
else:
if use_inplace:
@value.inplace.comparator
def _value_comparator(cls):
return UCComparator(cls._value)
else:
@value.comparator
def value(cls):
return UCComparator(cls._value)
@value.setter
def value(self, v):
self._value = v + 5
return A
def test_set_get(self):
A = self._fixture()
a1 = A(value=5)
eq_(a1._value, 10)
eq_(a1.value, 5)
@testing.variation("use_inplace", [True, False])
@testing.variation("use_classmethod", [True, False])
def test_value(self, use_inplace, use_classmethod):
A = self._fixture(
use_inplace=use_inplace, use_classmethod=use_classmethod
)
eq_(str(A.value == 5), "upper(a.value) = upper(:upper_1)")
@testing.variation("use_inplace", [True, False])
@testing.variation("use_classmethod", [True, False])
def test_aliased_value(self, use_inplace, use_classmethod):
A = self._fixture(
use_inplace=use_inplace, use_classmethod=use_classmethod
)
eq_(str(aliased(A).value == 5), "upper(a_1.value) = upper(:upper_1)")
@testing.variation("use_inplace", [True, False])
@testing.variation("use_classmethod", [True, False])
def test_query(self, use_inplace, use_classmethod):
A = self._fixture(
use_inplace=use_inplace, use_classmethod=use_classmethod
)
sess = fixture_session()
self.assert_compile(
sess.query(A.value), "SELECT a.value AS a_value FROM a"
)
@testing.variation("use_inplace", [True, False])
@testing.variation("use_classmethod", [True, False])
def test_aliased_query(self, use_inplace, use_classmethod):
A = self._fixture(
use_inplace=use_inplace, use_classmethod=use_classmethod
)
sess = fixture_session()
self.assert_compile(
sess.query(aliased(A).value),
"SELECT a_1.value AS a_1_value FROM a AS a_1",
)
@testing.variation("use_inplace", [True, False])
@testing.variation("use_classmethod", [True, False])
def test_aliased_filter(self, use_inplace, use_classmethod):
A = self._fixture(
use_inplace=use_inplace, use_classmethod=use_classmethod
)
sess = fixture_session()
self.assert_compile(
sess.query(aliased(A)).filter_by(value="foo"),
"SELECT a_1.id AS a_1_id, a_1.value AS a_1_value "
"FROM a AS a_1 WHERE upper(a_1.value) = upper(:upper_1)",
)
def test_docstring(self):
A = self._fixture()
eq_(A.value.__doc__, "This is a docstring")
def test_no_name_one(self):
"""test :ticket:`6215`"""
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
name = Column(String(50))
@hybrid.hybrid_property
def same_name(self):
return self.id
def name1(self):
return self.id
different_name = hybrid.hybrid_property(name1)
no_name = hybrid.hybrid_property(lambda self: self.name)
stmt = select(A.same_name, A.different_name, A.no_name)
compiled = stmt.compile()
eq_(
[ent._label_name for ent in compiled.compile_state._entities],
["same_name", "id", "name"],
)
def test_no_name_two(self):
"""test :ticket:`6215`"""
Base = declarative_base()
class SomeMixin:
@hybrid.hybrid_property
def same_name(self):
return self.id
def name1(self):
return self.id
different_name = hybrid.hybrid_property(name1)
no_name = hybrid.hybrid_property(lambda self: self.name)
class A(SomeMixin, Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
name = Column(String(50))
stmt = select(A.same_name, A.different_name, A.no_name)
compiled = stmt.compile()
eq_(
[ent._label_name for ent in compiled.compile_state._entities],
["same_name", "id", "name"],
)
def test_custom_op(self, registry):
"""test #3162"""
my_op = operators.custom_op(
"my_op", python_impl=lambda a, b: a + "_foo_" + b
)
@registry.mapped
class SomeClass:
__tablename__ = "sc"
id = Column(Integer, primary_key=True)
data = Column(String)
@hybrid.hybrid_property
def foo_data(self):
return my_op(self.data, "bar")
eq_(SomeClass(data="data").foo_data, "data_foo_bar")
self.assert_compile(SomeClass.foo_data, "sc.data my_op :data_1")
| PropertyComparatorTest |
python | aio-libs__aiohttp | aiohttp/http_exceptions.py | {
"start": 2564,
"end": 2807
} | class ____(BadStatusLine):
"""Invalid HTTP method in status line."""
def __init__(self, line: str = "", error: str | None = None) -> None:
super().__init__(line, error or f"Bad HTTP method in status line {line!r}")
| BadHttpMethod |
python | scipy__scipy | scipy/io/matlab/_mio5.py | {
"start": 18547,
"end": 30468
} | class ____:
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().view(arr.dtype.newbyteorder())
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tobytes is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tobytes(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array_like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array_like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError(f'Could not convert {arr} (type {type(arr)}) to array')
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker: # empty struct array
self.write_empty_struct()
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distinguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = math.prod(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as 1-D byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
# max field name length set to 1 in an example matlab struct
self.write_element(np.array(1, dtype=np.int32))
# Field names element is empty
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError(
f"Field names are restricted to {max_length - 1} characters"
)
self.write_element(np.array([length], dtype='i4'))
self.write_element(np.array(fieldnames, dtype=f'S{length}'), mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
| VarWriter5 |
python | walkccc__LeetCode | solutions/1079. Letter Tile Possibilities/1079.py | {
"start": 0,
"end": 554
} | class ____:
def numTilePossibilities(self, tiles: str) -> int:
count = collections.Counter(tiles)
def dfs(count: dict[int, int]) -> int:
possibleSequences = 0
for k, v in count.items():
if v == 0:
continue
# Put c in the current position. We only care about the number of possible
# sequences of letters but don't care about the actual combination.
count[k] -= 1
possibleSequences += 1 + dfs(count)
count[k] += 1
return possibleSequences
return dfs(count)
| Solution |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 2337,
"end": 2564
} | class ____(Model):
''' A base class for all ticker types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| Ticker |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 29177,
"end": 29455
} | class ____(BaseModel):
alias_name: str
after: AwareDatetime | None = None
before: AwareDatetime | None = None
limit: int | None = None
ascending: bool = True
type: Literal["GetAssetEventByAssetAlias"] = "GetAssetEventByAssetAlias"
| GetAssetEventByAssetAlias |
python | getsentry__sentry | src/sentry/incidents/handlers/condition/anomaly_detection_handler.py | {
"start": 1343,
"end": 3560
} | class ____(DataConditionHandler[AnomalyDetectionUpdate]):
group = DataConditionHandler.Group.DETECTOR_TRIGGER
comparison_json_schema = {
"type": "object",
"properties": {
"sensitivity": {
"type": "string",
"enum": [*AnomalyDetectionSensitivity],
},
"seasonality": {
"type": "string",
"enum": [*AnomalyDetectionSeasonality],
},
"threshold_type": {
"type": "integer",
"enum": [*AnomalyDetectionThresholdType],
},
},
"required": ["sensitivity", "seasonality", "threshold_type"],
"additionalProperties": False,
}
@staticmethod
def evaluate_value(update: AnomalyDetectionUpdate, comparison: Any) -> DetectorPriorityLevel:
from sentry.seer.anomaly_detection.get_anomaly_data import get_anomaly_data_from_seer
sensitivity = comparison["sensitivity"]
seasonality = comparison["seasonality"]
threshold_type = comparison["threshold_type"]
source_id = update.get("source_id")
assert source_id
subscription: QuerySubscription = QuerySubscription.objects.get(id=int(source_id))
anomaly_data = get_anomaly_data_from_seer(
sensitivity=sensitivity,
seasonality=seasonality,
threshold_type=threshold_type,
subscription=subscription,
subscription_update=update,
)
# covers both None and []
if not anomaly_data:
# something went wrong during evaluation
raise DataConditionEvaluationException("Error during Seer data evaluation process.")
anomaly_type = anomaly_data[0].get("anomaly", {}).get("anomaly_type")
if anomaly_type == AnomalyType.NO_DATA.value:
raise DataConditionEvaluationException(
"Project doesn't have enough data for detector to evaluate"
)
elif anomaly_type is None:
raise DataConditionEvaluationException("Seer response contained no evaluation data")
return SEER_EVALUATION_TO_DETECTOR_PRIORITY[anomaly_type]
| AnomalyDetectionHandler |
python | doocs__leetcode | solution/0000-0099/0017.Letter Combinations of a Phone Number/Solution2.py | {
"start": 0,
"end": 502
} | class ____:
def letterCombinations(self, digits: str) -> List[str]:
def dfs(i: int):
if i >= len(digits):
ans.append("".join(t))
return
for c in d[int(digits[i]) - 2]:
t.append(c)
dfs(i + 1)
t.pop()
if not digits:
return []
d = ["abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"]
ans = []
t = []
dfs(0)
return ans
| Solution |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 3564,
"end": 3690
} | class ____(torch.nn.Module):
def forward(self, x):
return MaterializingGradFunction.apply(x)
| MaterializingGradModule |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/Robot_arm/DPPO.py | {
"start": 1336,
"end": 4554
} | class ____(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # old pi to pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())]
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 200, tf.nn.relu, trainable=trainable)
mu = A_BOUND * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
| PPO |
python | pypa__warehouse | tests/common/db/packaging.py | {
"start": 5379,
"end": 5637
} | class ____(WarehouseFactory):
class Meta:
model = AlternateRepository
name = factory.Faker("word")
url = factory.Faker("uri")
description = factory.Faker("text")
project = factory.SubFactory(ProjectFactory)
| AlternateRepositoryFactory |
python | sqlalchemy__sqlalchemy | examples/inheritance/concrete.py | {
"start": 1406,
"end": 1858
} | class ____(Person):
__tablename__ = "engineer"
id: Mapped[int] = mapped_column(primary_key=True)
company_id: Mapped[int] = mapped_column(ForeignKey("company.id"))
name: Mapped[str50]
status: Mapped[str50]
engineer_name: Mapped[str50]
primary_language: Mapped[str50]
company: Mapped[Company] = relationship(back_populates="employees")
__mapper_args__ = {"polymorphic_identity": "engineer", "concrete": True}
| Engineer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.