index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
48,521 | eik4862/TinyCalculator | refs/heads/master | /Warning/__init__.py | __all__ = ['Warning', 'ParserWarning']
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,522 | eik4862/TinyCalculator | refs/heads/master | /Core/TypeSystem.py | from __future__ import annotations
from typing import final, List, Optional
class T:
def __init__(self, base: bool) -> None:
self.__base: bool = base
@classmethod
def supt(cls, t1: T, t2: T) -> T:
return t2 if t1 <= t2 else t1 if t2 <= t1 else None
@classmethod
def subt(cls, t1: T, t2: T) -> T:
return t1 if t1 <= t2 else t2 if t2 <= t1 else None
@property
def base(self) -> bool:
return self.__base
@final
class Real(T):
__inst: Real = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Real, Cmplx, Sym]
def __str__(self) -> str:
return 'Real'
@classmethod
def inst(cls) -> Real:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Real()
return cls.__inst
@final
class Cmplx(T):
__inst: Cmplx = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Cmplx, Sym]
def __str__(self) -> str:
return 'Complex'
@classmethod
def inst(cls) -> Cmplx:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Cmplx()
return cls.__inst
@final
class Str(T):
__inst: Str = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Str, Sym]
def __str__(self) -> str:
return 'String'
@classmethod
def inst(cls) -> Str:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Str()
return cls.__inst
@final
class Bool(T):
__inst: Bool = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Bool, Sym]
def __str__(self) -> str:
return 'Bool'
@classmethod
def inst(cls) -> Bool:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Bool()
return cls.__inst
@final
class Sym(T):
__inst: Sym = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) == Sym
def __str__(self) -> str:
return 'Symbol'
@classmethod
def inst(cls) -> Sym:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Sym()
return cls.__inst
@final
class Void(T):
__inst: Void = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Void, Sym]
def __str__(self) -> str:
return 'Void'
@classmethod
def inst(cls) -> Void:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Void()
return cls.__inst
@final
class Tens(T):
def __init__(self, chd_t: T, dim: List[int]) -> None:
super().__init__(False)
self.__chd_t: T = chd_t
self.__dim: List[int] = dim
self.__fold: int = len(dim)
def __le__(self, other: T) -> bool:
other_t: type = type(other)
if other_t == Sym:
return True
elif other_t == Tens:
return self.__chd_t <= other.chd_t and len(self.__dim) == len(other.dim)
elif other_t == Arr:
return self.__chd_t <= other.chd_t and len(self.__dim) == other.fold
else:
return False
def __eq__(self, other: T) -> bool:
return type(other) == Tens and self.__chd_t == other.chd_t and self.__fold == other.fold
def __ne__(self, other: T) -> bool:
return type(other) != Tens or self.__chd_t != other.chd_t or self.__fold != other.fold
def __str__(self) -> str:
return f'List of {self.__chd_t} ({self.__fold} fold)'
@property
def chd_t(self) -> T:
return self.__chd_t
@property
def dim(self) -> List[int]:
return self.__dim
@property
def fold(self) -> int:
return self.__fold
@final
class Arr(T):
def __init__(self, chd_t: T, fold: int, dim: List[T] = None) -> None:
super().__init__(False)
self.__chd_t: T = chd_t
self.__fold: int = fold
self.__dim: List[T] = dim
def __le__(self, other: T) -> bool:
return (type(other) == Sym) or \
(type(other) == Arr and self.__chd_t <= other.chd_t and self.__fold == other.fold)
def __eq__(self, other: T) -> bool:
return type(other) == Arr and self.__chd_t == other.chd_t and self.__fold == other.fold
def __ne__(self, other: T) -> bool:
return type(other) != Arr or self.__chd_t != other.chd_t or self.__fold != other.fold
def __str__(self) -> str:
return f'List of {self.__chd_t} ({self.__fold} fold)'
@property
def chd_t(self) -> T:
return self.__chd_t
@property
def dim(self) -> List[T]:
return self.__dim
@property
def fold(self) -> int:
return self.__fold
@final
class ArrFact:
__inst: ArrFact = None
@classmethod
def inst(cls) -> ArrFact:
if not cls.__inst:
cls.__inst = ArrFact()
return cls.__inst
def get_arr_t(self, chd_t: List[T]) -> Optional[T]:
if not chd_t:
return Tens(Void.inst(), [0])
res_t: T = chd_t[0]
for i in range(len(chd_t) - 1):
res_t = T.supt(res_t, chd_t[i + 1])
if not res_t:
return None
if res_t.base:
if type(res_t) == Void:
return Tens(Void.inst(), [0])
elif type(res_t) == Sym:
return res_t
else:
return Tens(res_t, [len(chd_t)])
if type(res_t) == Arr:
return Arr(res_t.chd_t, res_t.fold, chd_t)
else:
if len(chd_t) == 1:
return Tens(chd_t[0].chd_t, [1, *res_t.dim])
homo = all([d is not None for d in res_t.dim])
i: int = 2
while homo and i < len(chd_t):
homo &= (chd_t[0].dim == chd_t[i].dim)
i += 1
return Tens(res_t.chd_t, [len(chd_t), *res_t.dim]) if homo else Arr(res_t.chd_t, len(res_t.dim) + 1, chd_t)
def coerce_arr_t(self, src: T, chd_t: T) -> T:
if type(chd_t) == Sym:
return chd_t
else:
return Tens(chd_t, src.dim) if type(src) == Tens else Arr(chd_t, src.fold, src.dim)
def idx_arr_t(self, src: T) -> T:
if type(src) == Tens:
return src.chd_t if src.fold == 1 else Tens(src.chd_t, src.dim[1:])
else:
return Arr(src.chd_t, src.fold - 1)
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,523 | eik4862/TinyCalculator | refs/heads/master | /Function/Combination.py | from __future__ import annotations
from typing import final, Dict, Optional
from Core import Token, TypeSystem
from Function import Function
class CombFun(Function.Fun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
if t1.base:
if type(t1) == TypeSystem.Sym:
rt.t = t1
elif type(t1) == TypeSystem.Real:
rt.t = TypeSystem.Real
else:
return None
else:
return None
return t_env
@final
class Factorial(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class DoubleFactorial(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Pochhammer(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Binom(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Multinom(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Subfactorial(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class FacotirlaPower(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class AlternatingFactorial(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class CatalanNum(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class PolygonNum(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Bell(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Fibonacci(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Lucas(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Harmonic(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Stirling1(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Stiling2(CombFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,524 | eik4862/TinyCalculator | refs/heads/master | /Function/Division.py | from typing import final
from Function import Function
class DivFun(Function.Fun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Mod(DivFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class PowerMod(DivFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Quotient(DivFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,525 | eik4862/TinyCalculator | refs/heads/master | /Core/Interpreter.py | from __future__ import annotations
from typing import Dict, List
from Core import AST, Type, Token, TypeSystem
from Error import *
from Util import Printer
from Operator import *
class Interp:
"""
Type check AST and interpret it.
This class is implemented as singleton.
For the concept of singleton pattern, consult the references below.
**Reference**
* https://en.wikipedia.org/wiki/Singleton_pattern
:cvar __inst: Singleton object.
:ivar __expr: AST to be interpreted.
:ivar __line: Original user input string.
"""
__inst: Interp = None
__t_env: Dict[int, TypeSystem.T] = {2: 3}
def __init__(self) -> None:
self.__expr: AST.AST = None
self.__line: str = ''
def __chk_t(self) -> None:
"""
Check type of AST.
It just calls its helper ``Interp.__chk_t_hlpr``.
For detailed description of simplification, refer to the comments in ``Interp.__chk_t_hlpr``.
This method is private and called internally as the first step of interpreting chain.
For detailed description for interpreting chain, refer to the comments of ``Interp.interp``.
"""
self.__chk_t_hlpr(self.__expr.rt, self.__t_env)
def __chk_t_hlpr(self, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Dict[int, TypeSystem.T]:
"""
Check type of partial AST.
Usually, type checking is quite tricky logic.
For example, Hindley-Milner let type system needs unification algorithms for type checking.
But since grammar of math expression is simple, type checking logic is relatively simple.
It just calls corresponding type checking methods by looking up the value of root token.
After checking type, it assign inferred type of root token as its field value.
For concept of Hindley-Milner let type system and unification algorithm, consult the references below.
This method is private and called internally as a helper of ``Interp.__chk_t``.
**Reference**
* https://en.wikipedia.org/wiki/Hindley–Milner_type_system
* https://en.wikipedia.org/wiki/Unification_(computer_science)
:param rt: Root of partial AST to be typed checked.
:type rt: Token.Tok
"""
tok_t: type = type(rt)
if tok_t == Token.Num:
rt.t = TypeSystem.Cmplx.inst() if type(rt.v) == complex else TypeSystem.Real.inst()
return t_env
elif tok_t == Token.Str:
rt.t = TypeSystem.Str.inst()
return t_env
elif tok_t == Token.Bool:
rt.t = TypeSystem.Bool.inst()
return t_env
elif tok_t == Token.Void:
rt.t = TypeSystem.Void.inst()
return t_env
elif tok_t == Token.Var:
# find: TypeSystem.T = t_env.get(rt.v)
rt.t = TypeSystem.Sym.inst()
return t_env
elif tok_t == Token.List:
chd_t: List[TypeSystem.T] = []
for tok in rt.chd:
t_env = self.__chk_t_hlpr(tok, t_env)
chd_t.append(tok.t)
res_t: TypeSystem.T = TypeSystem.ArrFact.inst().get_arr_t(chd_t)
if not res_t:
raise Exception('type error')
rt.t = res_t
return t_env
elif tok_t == Token.Op:
for tok in rt.chd:
t_env = self.__chk_t_hlpr(tok, t_env)
t_env = rt.v.chk_t(rt, t_env)
if not t_env:
raise InterpreterError.TErr(23, *self.__expr.str_pos(rt), rt, rt.v.sgn(), rt.v.__name__.upper())
return t_env
else:
for tok in rt.chd:
t_env = self.__chk_t_hlpr(tok, t_env)
rt.t = TypeSystem.Sym()
return t_env
def __debug_hlpr(self, rt: Token.Tok, cnt: int) -> int:
buf: Type.BufT = Type.BufT.DEBUG # Debug buffer.
tok_t: type = type(rt)
if tok_t == Token.Op:
for tok in rt.chd:
cnt = self.__debug_hlpr(tok, cnt)
if rt.v in [Unary.Plus, Unary.Minus, Bool.Neg]:
t_str: str = f'{rt.v.sym()}{rt.chd[0].t} -> {rt.t}'
elif rt.v == Unary.Trans:
t_str: str = f'{rt.chd[0].t}{rt.v.sym()} -> {rt.t}'
elif rt.v == Delimiter.Seq:
if rt.argc == 2:
t_str: str = f'{rt.chd[0].t}:{rt.chd[1].t} -> {rt.t}'
else:
t_str: str = f'{rt.chd[0].t}:{rt.chd[1].t}:{rt.chd[1].t} -> {rt.t}'
elif rt.v == Delimiter.Idx:
t_str: str = f'{rt.chd[0].t}[' + ', '.join([str(tok.t) for tok in rt.chd[1:]]) + f'] -> {rt.t}'
else:
t_str: str = f'{rt.chd[0].t} {rt.v.sym()} {rt.chd[1].t} -> {rt.t}'
elif tok_t == Token.Fun:
for tok in rt.chd:
cnt = self.__debug_hlpr(tok, cnt)
t_str: str = f'{rt.v_str()}[' + ', '.join([str(tok.t) for tok in rt.chd]) + f'] -> {rt.t}'
elif tok_t == Token.List:
for tok in rt.chd:
cnt = self.__debug_hlpr(tok, cnt)
t_str: str = '{' + ', '.join([str(tok.t) for tok in rt.chd]) + '}' + f' -> {rt.t}'
else:
t_str: str = str(rt.t)
Printer.Printer.inst().buf(f'[{cnt}]', buf, indent=4)
Printer.Printer.inst().buf(f'@partial AST: {AST.AST(rt)}', buf, indent=6)
Printer.Printer.inst().buf(f'@inferred : {t_str}', buf, indent=6)
Printer.Printer.inst().buf_newline(buf)
return cnt + 1
# def __simplify(self) -> None:
# """
# Simplify AST.
#
# It just calls its helper ``Interp.__simplify_hlpr``.
# For detailed description of simplification, refer to the comments in ``Interp.__simplify_hlpr``.
#
# This method is private and called internally as the second step of interpreting chain.
# For detailed description for interpreting chain, refer to the comments of ``Interp.interp``.
# """
# self.__expr.rt = self.__simplify_hlpr(self.__expr.rt, None)
#
# def __simplify_hlpr(self, rt: Token.Tok, prn: Token.Tok) -> Token.Tok:
# """
# Simplify partial AST.
#
# It does following simplifications.
# 1. Constant folding.
# 2. Sign propagation.
# 3. Dead expression stripping.
# 4. Hoisting.
# 5. Packing.
# 6. Unpacking.
# 7. Function coalescing.
# Most of these simplification tricks are originally part of compiler optimization, virtual memory management, and
# programing language scheme.
# It just calls corresponding simplifying methods by looking up the value of root token.
# For detailed description and examples of each trick, consult following references and comments of
# ``Op.simplify`` and ``Tri.simplify``.
#
# This method is private and called internally as a helper of ``Interp.__simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Constant_folding
# * https://en.wikipedia.org/wiki/Dead_code_elimination
# * https://developer.mozilla.org/ko/docs/Glossary/Hoisting
# * https://en.wikipedia.org/wiki/Coalescing_(computer_science)
#
# :param rt: Root of partial AST to be simplified.
# :type rt: Token.Tok
# :param prn: Parent of root of partial AST to be simplified.
# :type prn: Token.Tok
#
# :return: Root of simplified partial AST.
# :rtype: Token.Tok
# """
# if rt.tok_t in [Type.TokT.NUM, Type.TokT.VAR, Type.TokT.STR, Type.TokT.VOID]:
# return rt
# elif rt.tok_t == Type.TokT.OP:
# rt.chd = [self.__simplify_hlpr(tok, rt) for tok in rt.chd]
# simple, warn = Operator.Op.pck(rt, prn)
#
# for it in warn:
# WarningManager.WarnManager.inst().push(it)
#
# return simple
# elif rt.tok_t == Type.TokT.FUN:
# rt.chd = [self.__simplify_hlpr(tok, rt) for tok in rt.chd]
#
# if rt.v in [Type.FunT.Sin, Type.FunT.Cos, Type.FunT.Tan, Type.FunT.Csc, Type.FunT.Sec, Type.FunT.Cot,
# Type.FunT.ArcSin, Type.FunT.ArcCos, Type.FunT.ArcTan, Type.FunT.ArcCsc, Type.FunT.ArcSec,
# Type.FunT.ArcCot]:
# simple, warn = Trigonometric.TriFun.simplify(rt)
# elif rt.v in [Type.FunT.Sinh, Type.FunT.Cosh, Type.FunT.Tanh, Type.FunT.Csch, Type.FunT.Sech,
# Type.FunT.Coth, Type.FunT.ArcSinh, Type.FunT.ArcCosh, Type.FunT.ArcTanh, Type.FunT.ArcCsch,
# Type.FunT.ArcSech, Type.FunT.ArcCoth]:
# simple, warn = Hyperbolic.HypbolicFunc.simplify(rt)
# elif rt.v in [Type.FunT.Exp, Type.FunT.Log, Type.FunT.Pow, Type.FunT.Sqrt, Type.FunT.Log2, Type.FunT.Log10]:
# simple, warn = Exponential.ExpFun.simplify(rt)
# else:
# simple, warn = SpecialFunction.SpecialFun.simplify(rt)
#
# for it in warn:
# WarningManager.WarnManager.inst().push(it)
#
# return simple
# else:
# rt.chd = [self.__simplify_hlpr(tok, rt) for tok in rt.chd]
#
# return rt
#
# def __eval(self) -> bool:
# """
# Evaluate AST and return whether further evaluation is needed.
#
# It just calls its helper ``Interp.__eval_hlpr``.
# For detailed description of evaluation, refer to the comments in ``Interp.__eval_hlpr``.
#
# This method is private and called internally as the fourth of interpreting chain.
# For detailed description for interpreting chain, refer to the comments of ``Interp.interp``.
#
# :return: True if there is need of further evaluation. False otherwise.
# :rtype: bool
# """
# evaled, done = self.__eval_hlpr(self.__expr.rt)
# self.__expr.rt = evaled
#
# return done
#
# def __eval_hlpr(self, rt: Token.Tok) -> Tuple[Token.Tok, bool]:
# """
# Evaluate partial AST and return whether further evaluation is needed.
#
# It just calls corresponding evaluation methods by looking up the value of root token.
# Note that it only evaluates command at the lowest level of AST, not all.
#
# This method is private and called internally as a helper of ``Interp.__eval``.
#
# :param rt: Root of partial AST to be evaluated.
# :type rt: Token.Tok
#
# :return: Root of evaluated partial AST and flag for further evaluation.
# The flag is true if there is need of further evaluation, and false otherwise.
# :rtype: Tuple[Token.Tok, bool]
# """
# if rt.tok_t in [Type.TokT.NUM, Type.TokT.VAR, Type.TokT.STR, Type.TokT.VOID]:
# return rt, False
# elif rt.tok_t in [Type.TokT.OP, Type.TokT.FUN]:
# done: bool = False
#
# for i in range(len(rt.chd)):
# evaled, tmp = self.__eval_hlpr(rt.chd[i])
# done |= tmp
# rt.swap_chd(evaled, i)
#
# return rt, done
# else:
# done: bool = False
#
# for i in range(len(rt.chd)):
# evaled, tmp = self.__eval_hlpr(rt.chd[i])
# done |= tmp
# rt.swap_chd(evaled, i)
#
# if done:
# return rt, done
#
# if rt.v in [Type.CmdT.HELP, Type.CmdT.QUIT, Type.CmdT.SET_SYS_VAR, Type.CmdT.GET_SYS_VAR, Type.CmdT.SLEEP]:
# evaled, warn = Utility.Util.eval(rt)
#
# for it in warn:
# WarningManager.WarnManager.inst().push(it)
#
# return evaled, True
@classmethod
def inst(cls) -> Interp:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Interp
"""
if not cls.__inst:
cls.__inst = Interp()
return cls.__inst
def interp(self, expr: AST.AST, debug: bool = False) -> AST.AST:
"""
Type check AST and interpret it.
Interpreting is recursive procedure comprised of 5 steps.
1. Type check AST.
Run type checker on AST, filtering type errors before evaluating it.
2. Simplify AST.
After simplification, AST becomes much simpler, lightening burden of evaluation.
3. Check whether further evaluation is needed.
If further evaluation is not needed, interpreting chain stops here, returning fully interpreted AST.
Note that returning AST may not be single NUM token.
4. Evaluate AST partially.
With simplified AST, it evaluates CMD tokens in AST partially.
Here, partially means that it only evaluates CMD tokens at the lowest level of AST, not all of them.
This is because after partial evaluation, further simplification may be possible.
5. Move to step 2 and repeat.
By step 5, there is a danger of infinite loop.
Further, evaluation may tak very long time.
Thus there is a timeout limit of this recursive loop, which is defined as system variable
``Computation_Timeout``.
This timeout limit can be customized using ``Set_sys_var`` command.
This method supports brief summary outputs which can be used for debugging or generation of debug set.
:param expr: AST to be interpreted.
:type expr: AST.AST
:param debug: Flag for debug mode. (Default: False)
:type debug: bool
:return: Interpreted AST.
:rtype: AST.AST
"""
self.__expr = expr
self.__line = expr.line
if debug:
buf: Type.BufT = Type.BufT.DEBUG # Debug buffer.
# Print out interpreting target.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('interpreting target'), buf)
Printer.Printer.inst().buf(f'@AST: {expr}', buf, indent=2)
Printer.Printer.inst().buf_newline(buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('interpreting chain'), buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running type checker'), buf, False, 2)
# Run type checker.
try:
self.__chk_t()
except Error.InterpErr as interpreter_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
Printer.Printer.inst().buf_newline(buf)
raise interpreter_err
else:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
self.__debug_hlpr(expr.rt, 0)
Printer.Printer.inst().buf_newline(buf)
#
# iter: int = 1 # Interpretation loop counter.
#
# try:
# with SystemManager.timeout(SystemManager.SysManager.inst().get_sys_var('Computation_Timeout').v):
# while True:
# # Run simplifier.
# Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running simplifier'), buf, False, 2)
#
# try:
# self.__simplify()
# except Error.InterpErr as interpreter_err:
# Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
# Printer.Printer.inst().buf_newline(buf)
#
# raise interpreter_err
# else:
# Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
#
# Printer.Printer.inst().buf(f'@simplified: {self.__expr.infix()}', buf, indent=4)
# Printer.Printer.inst().buf_newline(buf)
#
# # Evaluate.
# Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Evaluating AST'), buf, False, 2)
#
# try:
# cont: bool = self.__eval()
# except Error.UtilErr as util_err:
# if util_err.t == Type.UtilErrT.QUIT:
# Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
# else:
# Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
#
# Printer.Printer.inst().buf_newline(buf)
# raise util_err
# except Error.Err as err:
# Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
# Printer.Printer.inst().buf_newline(buf)
#
# raise err
# else:
# Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
#
# Printer.Printer.inst().buf(f'@evaluated: {self.__expr.infix()}', buf, indent=4)
#
# if not cont:
# # If there is no need of further evaluation, return.
#
# Printer.Printer.inst().buf('@continue : False', buf, indent=4)
# Printer.Printer.inst().buf(f'@iter : {iter}', buf, indent=4)
# Printer.Printer.inst().buf_newline(buf)
#
# return self.__expr
#
# Printer.Printer.inst().buf('@continue : True', buf, indent=4)
# Printer.Printer.inst().buf_newline(buf)
#
# iter += 1
#
# except Error.SysErr as sys_err:
# # In case of timeout, clear global data structures manipulated so far to avoid async errors.
# Printer.Printer.inst().clr(Type.BufT.DEBUG)
# Printer.Printer.inst().clr(Type.BufT.INTERNAL)
# WarningManager.WarnManager.inst().clr()
# sys_err.iter = iter
# sys_err.err_no = 24
#
# raise sys_err
else:
self.__chk_t()
# try:
# with SystemManager.timeout(SystemManager.SysManager.inst().get_sys_var('Computation_Timeout').v):
# iter: int = 1 # Interpretation loop counter.
#
# while True:
# self.__simplify()
#
# if not self.__eval():
# return self.__expr
#
# iter += 1
# except Error.SysErr as sys_err:
# sys_err.iter = iter
# sys_err.err_no = 24
#
# raise sys_err
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,526 | eik4862/TinyCalculator | refs/heads/master | /Operator/Delimiter.py | from __future__ import annotations
from sys import maxsize
from typing import final, Final, Tuple, Dict, Optional, List
from Core import Token, TypeSystem
from Operator import Operator
class DelimOp(Operator.Op):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Lpar(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = '('
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class Rpar(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = ')'
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class SqrLpar(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = '['
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class SqrRpar(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = ']'
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class CrlLpar(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = '{'
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class CrlRpar(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = '}'
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class Com(DelimOp):
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SYM: Final[str] = ','
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@final
class Seq(DelimOp):
__PRECD: Final[Tuple[int, int]] = (16, 15)
__SYM: Final[str] = ':'
__SGN: Final[List[str]] = ['Real:Real -> List of Real (1 fold)',
'Real:Real:Real -> List of Real (1 fold)',
'Sym:Sym -> Sym',
'Sym:Sym:Sym -> Sym']
__ARGC: Final[int] = 2
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base and t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) == TypeSystem.Real:
rt.t = TypeSystem.Tens(res_t, [None])
elif type(res_t) == TypeSystem.Sym:
rt.t = res_t
else:
return None
else:
return None
return t_env
@final
class Idx(DelimOp):
__PRECD: Final[Tuple[int, int]] = (22, 21)
__SYM: Final[str] = 'IDX'
__SGN: Final[List[str]] = ['Sym[Any] -> Sym',
'List of Any (1 fold) [Real] -> Any',
'List of Any (n fold) [Real] -> List of Any (n - 1 fold) given that n > 1',
'List of Any (n fold) [List of Real (1 fold)] -> List of Any (n fold)',
'List of Any (n fold) [List of Bool (1 fold)] -> List of Any (n fold)',
'List of Any[Sym] -> Sym',
'Sym[Any, ..., Any] = Sym[Any]...[Any]',
'List of Any (n fold)[Any, ..., Any] = List of Any[Any]...[Any] given that # of idx <= n']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def __chk_t_hlpr(cls, src_t: TypeSystem.T, idx_t: TypeSystem.T) -> Optional[TypeSystem.T]:
if src_t.base:
if type(src_t) == TypeSystem.Sym:
return src_t
else:
return None
else:
if idx_t.base:
if type(idx_t) == TypeSystem.Real:
return TypeSystem.ArrFact.inst().idx_arr_t(src_t)
elif type(idx_t) == TypeSystem.Sym:
return idx_t
else:
return None
else:
if type(idx_t.chd_t) in [TypeSystem.Real, TypeSystem.Bool] and idx_t.fold == 1:
return src_t
else:
return None
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
if any(map(lambda x: type(x.t) == TypeSystem.Sym, rt.chd)):
rt.t = TypeSystem.Sym.inst()
return t_env
if t1.base or t1.fold < rt.argc - 1:
return None
for tok in rt.chd[1:]:
t1 = cls.__chk_t_hlpr(t1, tok.t)
if not t1:
return None
rt.t = t1
return t_env
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,527 | eik4862/TinyCalculator | refs/heads/master | /Function/Function.py | from sys import maxsize
from typing import Final, Tuple, List
class Fun:
__PRECD: Final[Tuple[int, int]] = (0, maxsize)
__SGN: List[str] = None
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,528 | eik4862/TinyCalculator | refs/heads/master | /Operator/Bool.py | from __future__ import annotations
from typing import final, Final, Tuple, Dict, Optional, List
from Core import Token, TypeSystem
from Operator import Operator
class BoolOp(Operator.Op):
__ARGC: int = 2
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) in [TypeSystem.Bool, TypeSystem.Sym]:
rt.t = res_t
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if type(res_t) in [TypeSystem.Bool, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, res_t)
else:
return None
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if type(res_t) in [TypeSystem.Bool, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, res_t)
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if res_t and type(res_t.chd_t) == TypeSystem.Bool:
rt.t = res_t
else:
return None
return t_env
@final
class Neg(BoolOp):
__PRECD: Final[Tuple[int, int]] = (19, 20)
__SYM: Final[str] = '!'
__SGN: Final[List[str]] = ['!Bool -> Bool',
'!Sym -> Sym',
'!List of Bool (n fold) -> List of Bool (n fold)']
__ARGC: Final[int] = 1
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t: TypeSystem.T = rt.chd[0].t
if t.base:
if type(t) not in [TypeSystem.Bool, TypeSystem.Sym]:
return None
else:
rt.t = TypeSystem.Bool.inst()
else:
if type(t.chd_t) != TypeSystem.Bool:
return None
else:
rt.t = TypeSystem.Bool.inst()
return t_env
@final
class And(BoolOp):
__PRECD: Final[Tuple[int, int]] = (6, 5)
__SYM: Final[str] = '&'
__SGN: Final[List[str]] = ['Bool & Bool -> Bool',
'Sym & Sym -> Sym',
'Bool & List of Bool (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) & Bool -> List of Bool (n fold)',
'List of Bool (n fold) & List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Or(BoolOp):
__PRECD: Final[Tuple[int, int]] = (4, 3)
__SYM: Final[str] = '|'
__SGN: Final[List[str]] = ['Bool | Bool -> Bool',
'Sym | Sym -> Sym',
'Bool | List of Bool (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) | Bool -> List of Bool (n fold)',
'List of Bool (n fold) | List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Xor(BoolOp):
__PRECD: Final[Tuple[int, int]] = (4, 3)
__SYM: Final[str] = '^'
__SGN: Final[List[str]] = ['Bool ^ Bool -> Bool',
'Sym ^ Sym -> Sym',
'Bool ^ List of Bool (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) ^ Bool -> List of Bool (n fold)',
'List of Bool (n fold) ^ List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,529 | eik4862/TinyCalculator | refs/heads/master | /Error/InterpreterError.py | from __future__ import annotations
from typing import final, List
from Core import Token
from Error import Error
from Operator import *
@final
class TErr(Error.InterpErr):
def __init__(self, errno: int, line: str, pos: int, err_tok: Token.Tok, cand_sgn: List[str], handle: str) -> None:
super().__init__(errno, line, pos)
self.__cand_sgn: List[str] = cand_sgn
self.__handle: str = handle
if type(err_tok) == Token.Op:
if err_tok.v in [Unary.Plus, Unary.Minus, Bool.Neg]:
self.__err_sgn: str = f'{err_tok.v.sym()}{err_tok.chd[0].t}'
elif err_tok.v == Unary.Trans:
self.__err_sgn: str = f'{err_tok.chd[0].t}{err_tok.v.sym()}'
elif err_tok.v == Delimiter.Seq:
self.__err_sgn: str = f'{err_tok.chd[0].t}:{err_tok.chd[1].t}'
elif err_tok.v == Delimiter.Idx:
self.__err_sgn: str = f'{err_tok.chd[0].t}[' + ', '.join([str(tok.t) for tok in err_tok.chd[1:]]) + f']'
else:
self.__err_sgn: str = f'{err_tok.chd[0].t} {err_tok.v.sym()} {err_tok.chd[1].t}'
@property
def err_sgn(self) -> str:
return self.__err_sgn
@property
def cand_sgn(self) -> List[str]:
return self.__cand_sgn
@property
def handle(self) -> str:
return self.__handle
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,530 | eik4862/TinyCalculator | refs/heads/master | /Function/Trigonometric.py | from __future__ import annotations
from typing import List, final, Final, Optional, Dict
from Function import Function
from Core import Token, TypeSystem
class TriFun(Function.Fun):
"""
Trigonometric function toolbox.
:cvar __sign: Signatures of trigonometric functions.
"""
def __new__(cls) -> None:
raise NotImplementedError
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t = rt.chd[0].t
if type(t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = t
return t_env
else:
return None
# @classmethod
# def __sin(cls, x: float) -> float:
# """
# Sine function.
#
# Sine function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite, the result is ``math.sin(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where sine function is to be computed.
# :type x: float
#
# :return: Computed value of sine function.
# :rtype: float
# """
# return math.nan if math.isinf(x) else math.sin(x)
#
# @classmethod
# def __cos(cls, x: float) -> float:
# """
# Cosine function.
#
# Cosine function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite, the result is ``math.cos(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where cosine function is to be computed.
# :type x: float
#
# :return: Computed value of cosine function.
# :rtype: float
# """
# return math.nan if math.isinf(x) else math.cos(x)
#
# @classmethod
# def __tan(cls, x: float) -> float:
# """
# Tangent function.
#
# Tangent function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is (integer multiple of pi)+pi/2, the result is nan.
# 3. If x is finite which is not (integer multiple of pi)+pi/2, the result is ``math.tan(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where tangent function is to be computed.
# :type x: float
#
# :return: Computed value of tangent function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or (x - math.pi / 2) % math.pi == 0 else math.tan(x)
#
# @classmethod
# def __csc(cls, x: float) -> float:
# """
# Cosecant function.
#
# Cosecant function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is integer multiple of pi, the result is nan.
# 3. If x is finite which is not integer multiple of pi, the result is ``1 / math.sin(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where cosecant function is to be computed.
# :type x: float
#
# :return: Computed value of cosecant function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or x % math.pi == 0 else 1 / math.sin(x)
#
# @classmethod
# def __sec(cls, x: float) -> float:
# """
# Secant function.
#
# Secant function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is (integer multiple of pi)+pi/2, the result is nan.
# 3. If x is finite which is not (integer multiple of pi)+pi/2, the result is ``1 / math.cos(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where secant function is to be computed.
# :type x: float
#
# :return: Computed value of secant function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or (x - math.pi / 2) % math.pi == 0 else 1 / math.cos(x)
#
# @classmethod
# def __cot(cls, x: float) -> float:
# """
# Cotangent function.
#
# Secant function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is integer multiple of pi, the result is nan.
# 3. If x is finite which is not integer multiple of pi, the result is ``1 / math.tan(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where cotangent function is to be computed.
# :type x: float
#
# :return: Computed value of cotangent function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or x % math.pi == 0 else 1 / math.tan(x)
#
# @classmethod
# def __asin(cls, x: float) -> float:
# """
# Arcsine function.
#
# Arcsine function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is not in [-1, 1], the result is nan.
# 3. If x is finite which is in [-1. 1], the result is ``math.asin(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where arcsine function is to be computed.
# :type x: float
#
# :return: Computed value of arcsine function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or not (-1 <= x <= 1) else math.asin(x)
#
# @classmethod
# def __acos(cls, x: float) -> float:
# """
# Arccosine function.
#
# Arccosine function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is not in [-1, 1], the result is nan.
# 3. If x is finite which is in [-1. 1], the result is ``math.acos(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where arccosine function is to be computed.
# :type x: float
#
# :return: Computed value of arccosine function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or not (-1 <= x <= 1) else math.acos(x)
#
# @classmethod
# def __atan(cls, x: float) -> float:
# """
# Arctangent function.
#
# Arctangent function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is +-pi/2, resp.
# 3. If x is finite, the result is ``math.atan(x)``.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# :param x: Point where arctangent function is to be computed.
# :type x: float
#
# :return: Computed value of arctangent function.
# :rtype: float
# """
# return math.atan(x)
#
# @classmethod
# def __acsc(cls, x: float) -> float:
# """
# Arccosecant function.
#
# Arccosecant function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is in (-1, 1), the result is nan.
# 4. If x is finite which is not in (-1, 1), the result is ``math.asin(1 / x)``.
# Here, the rule 4 is based on identity ``asec(x) = acos(1 / x)``.
# For detail and more identities, consult the reference below.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
#
# :param x: Point where arccosecant function is to be computed.
# :type x: float
#
# :return: Computed value of arccosecant function.
# :rtype: float
# """
# return math.nan if -1 < x < 1 else math.asin(1 / x)
#
# @classmethod
# def __asec(cls, x: float) -> float:
# """
# Arcsecant function.
#
# Arcsecant function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is pi/2.
# 3. If x is in (-1, 1), the result is nan.
# 4. If x is finite which is not in (-1, 1), the result is ``math.acos(1 / x)``.
# Here, the rule 4 is based on identity ``asec(x) = acos(1 / x)``.
# For detail and more identities, consult the reference below.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
#
# :param x: Point where arcsecant function is to be computed.
# :type x: float
#
# :return: Computed value of arcsecant function.
# :rtype: float
# """
# return math.nan if -1 < x < 1 else math.acos(1 / x)
#
# @classmethod
# def __acot(cls, x: float) -> float:
# """
# Arccotangent function.
#
# Arccotangent function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is finite negative, the result is ``-(math.pi / 2 + math.atan(x))``.
# 4. If x is nonnegative, the result is ``math.pi / 2 - math.atan(x)``.
# Here, the rule 3 and 4 are based on identity ``acot(x) = -pi / 2 - atan(x)`` for negative x and
# ``acot(x) = pi / 2 - atan(x)`` for nonnegative x.
# For detail and more identities, consult the reference below.
#
# This method is private and called internally as a helper of ``Tri.simplify``.
# For detailed description for simplification, refer to the comments of ``Tri.simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
#
# :param x: Point where arccotangent function is to be computed.
# :type x: float
#
# :return: Computed value of arccotangent function.
# :rtype: float
# """
# return -(math.pi / 2 + math.atan(x)) if x < 0 else math.pi / 2 - math.atan(x)
#
# # @classmethod
# # def chk_t(cls, rt: Token.Fun) -> Optional[List[Type.Sign]]:
# # """
# # Type checker for trigonometric functions.
# # It checks type of input function token and assigns return type as type information of the token.
# #
# # :param rt: Token to be type checked.
# # :type rt: Token.Fun
# #
# # :return: None if type check is successful. Candidate signatures if not.
# # :rtype: Optional[List[Type.Signature]]
# # """
# # cand: List[Type.Sign] = cls.__sign.get(rt.v) # Candidate signatures
# # infer: Type.Sign = Type.Sign([tok.t for tok in rt.chd], Type.T.REAL, rt.v) # Inferred signature
# #
# # # Inferred signature must be one of candidates and return type is NUM type.
# # if infer in cand:
# # rt.t = Type.T.REAL
# #
# # return None
# # else:
# # return cand
#
# # TODO: No function coalescing. They are not safe.
# @classmethod
# def simplify(cls, rt: Token.Fun) -> Tuple[Token.Tok, List[Warning.InterpWarn]]:
# """
# Simplifier for trigonometric functions.
#
# It does following simplifications.
# 1. Constant folding.
# 2. Dead expression stripping.
# 3. Sign propagation.
# For details and detailed explanation of these optimization tricks, refer to the comments of
# ``Operator.simplify`` and references therein.
#
# :param rt: Root of AST to be simplified.
# :type rt: Token.Fun
#
# :return: Root of simplified AST and list of generated warnings.
# :rtype: Tuple[Token.Tok, List[Warning.InterpWarn]]
#
# :raise NAN_DETECT: If nan is detected as a given parameter.
# :raise IFN_DETECT: If inf is detected as a given parameter.
# :raise DOMAIN_OUT: If given parameter is not in domain.
# :raise POLE_DETECT: If mathematical pole is detected.
# :raise BIG_INT: If given parameter exceeds floating point max.
# :raise SMALL_INT: If given parameter exceeds floating point min.
# """
# warn: List[Warning.InterpWarn] = [] # List of generated warnings.
#
# if rt.v == Type.FunT.Sin:
# # Check for warnings.
# # Sine function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Sin'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Sin'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Sin'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Sin'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__sin``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__sin(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Sin[-x] = -Sin[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Cos:
# # Check for warnings.
# # Cosine function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Cos'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Cos'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Cos'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Cos'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__cos``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__cos(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Cos[-x] = Cos[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Tan:
# # Check for warnings.
# # Tangent function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is integer multiple of pi + pi/2. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Tan'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Tan'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Tan'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Tan'))
# elif (rt.chd[0].v - math.pi / 2) % math.pi == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 5))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__tan``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__tan(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Tan[-x] = -Tan[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Csc:
# # Check for warnings.
# # Cosecant function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is integer multiple of pi. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Csc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Csc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Csc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Csc'))
# elif rt.chd[0].v % math.pi == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 26))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__csc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__csc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Csc[-x] = -Csc[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Sec:
# # Check for warnings.
# # Secant function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is integer multiple of pi + pi/2. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Sec'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Sec'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Sec'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Sec'))
# elif (rt.chd[0].v - math.pi / 2) % math.pi == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 27))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__sec``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__sec(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Sec[-x] = Sec[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Cot:
# # Check for warnings.
# # Cotangent function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is integer multiple of pi. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Cot'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Cot'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Cot'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Cot'))
# elif rt.chd[0].v % math.pi == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 28))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__cot``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__cot(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Cot[-x] = -Cot[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcSin:
# # Check for warnings.
# # Arcsine function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is not in [-1, 1]. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Asin'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Asin'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Asin'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Asin'))
# elif not (-1 <= rt.chd[0].v <= 1):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 3))
#
# # Constant folding.
# # Arcsine function with parameter x has following rules.
# # For detailed computation rule, refer to the comment in ``Tri.__asin``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__asin(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Asin[-x] = -Asin[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcCos:
# # Check for warnings.
# # Arccosine function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is not in [-1, 1]. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Acos'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Acos'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Acos'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Acos'))
# elif not (-1 <= rt.chd[0].v <= 1):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 4))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__acos``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__acos(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcTan:
# # Check for warnings.
# # Arctangent function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Atan'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Atan'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Atan'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Atan'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__atan``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__atan(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Atan[-x] = -Atan[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcCsc:
# # Check for warnings.
# # Arccosecant function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is in (-1, 1). (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Acsc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Acsc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Acsc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Acsc'))
# elif -1 < rt.chd[0].v < 1:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 29))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__acsc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__acsc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Acsc[-x] = -Acsc[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcSec:
# # Check for warnings.
# # Arcsecant function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is in (-1, 1). (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Asec'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Asec'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Asec'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Asec'))
# elif -1 < rt.chd[0].v < 1:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 30))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__asec``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__asec(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# else:
# # Check for warnings.
# # Arccotangent function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Acot'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Acot'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Acot'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Acot'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``Tri.__acot``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__acot(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
#
# @classmethod
# def test(cls, fun: Type.FunT, test_in: List[List[Decimal]]) -> List[Decimal]:
# """
# Test function for trigonometric function.
#
# It just call corresponding target function and evaluate it at test input points.
#
# :param fun: Function to be tested.
# :type fun: Type.FunT
# :param test_in: Test input.
# :type test_in: List[List[Decimal]]
#
# :return: Test output.
# :rtype: List[Decimal]
# """
# if fun == Type.FunT.Sin:
# return list(map(lambda x: Decimal(cls.__sin(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Cos:
# return list(map(lambda x: Decimal(cls.__cos(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Tan:
# return list(map(lambda x: Decimal(cls.__tan(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Csc:
# return list(map(lambda x: Decimal(cls.__csc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Sec:
# return list(map(lambda x: Decimal(cls.__sec(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Cot:
# return list(map(lambda x: Decimal(cls.__cot(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.ArcSin:
# return list(map(lambda x: Decimal(cls.__asin(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.ArcCos:
# return list(map(lambda x: Decimal(cls.__acos(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.ArcTan:
# return list(map(lambda x: Decimal(cls.__atan(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.ArcCsc:
# return list(map(lambda x: Decimal(cls.__acsc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.ArcSec:
# return list(map(lambda x: Decimal(cls.__asec(*list(map(float, x)))), test_in))
# else:
# return list(map(lambda x: Decimal(cls.__acot(*list(map(float, x)))), test_in))
#
@final
class Sin(TriFun):
__SGN: Final[List[str]] = ['Sin[Real] -> Real', 'Sin[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Cos(TriFun):
__SGN: Final[List[str]] = ['Cos[Real] -> Real', 'Cos[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Tan(TriFun):
__SGN: Final[List[str]] = ['Tan[Real] -> Real', 'Tan[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Csc(TriFun):
__SGN: Final[List[str]] = ['Csc[Real] -> Real', 'Csc[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Sec(TriFun):
__SGN: Final[List[str]] = ['Csc[Real] -> Real', 'Csc[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Cot(TriFun):
__SGN: Final[List[str]] = ['Cot[Real] -> Real', 'Cot[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcSin(TriFun):
__SGN: Final[List[str]] = ['ArcSin[Real] -> Real', 'ArcSin[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcCos(TriFun):
__SGN: Final[List[str]] = ['ArcCos[Real] -> Real', 'ArcCos[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcTan(TriFun):
__SGN: Final[List[str]] = ['ArcTan[Real] -> Real', 'ArcTan[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcCsc(TriFun):
__SGN: Final[List[str]] = ['ArcCsc[Real] -> Real', 'ArcCsc[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcSec(TriFun):
__SGN: Final[List[str]] = ['ArcSec[Real] -> Real', 'ArcSec[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcCot(TriFun):
__SGN: Final[List[str]] = ['ArcCot[Real] -> Real', 'ArcCot[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Haversine(TriFun):
__SGN: Final[List[str]] = ['Haversine[Real] -> Real', 'Haversine[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class InverseHaversine(TriFun):
__SGN: Final[List[str]] = ['InverseHaversine[Real] -> Real', 'InverseHaversine[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Sinc(TriFun):
__SGN: Final[List[str]] = ['Sinc[Real] -> Real', 'Sinc[Sym] -> Sym']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,531 | eik4862/TinyCalculator | refs/heads/master | /Function/Link.py | from typing import final
from Function import Function
class LinkFun(Function.Fun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Logit(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Reciprocal(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class InverseSquare(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Probit(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Cauchit(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class LogLog(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class LogComplement(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class LogLogComplement(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class OddsPower(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class NegBinomLink(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class PowerLink(LinkFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,532 | eik4862/TinyCalculator | refs/heads/master | /Command/Utility.py | # import datetime
# import math
# import time
# from typing import List, Dict, Optional, Tuple
#
# from Core import Type, Token, SystemManager, Error, Warning
# from Util import Printer
# from Util.Macro import is_bigint, is_smallint
#
#
# class Util:
# """
# Utility command toolbox.
#
# :cvar __sign: Signatures of utility commands.
# """
# # __sign: Dict[Type.CmdT, List[Type.Sign]] = {
# # Type.CmdT.QUIT: [Type.Sign([], Type.T.TER, Type.CmdT.QUIT)],
# # Type.CmdT.HELP: [Type.Sign([], Type.T.TER, Type.CmdT.HELP),
# # Type.Sign([Type.T.STR], Type.T.TER, Type.CmdT.HELP)],
# # Type.CmdT.GET_SYS_VAR: [Type.Sign([Type.T.STR], Type.T.STR, Type.CmdT.GET_SYS_VAR)],
# # Type.CmdT.SET_SYS_VAR: [Type.Sign([Type.T.STR, Type.T.NUM], Type.T.TER, Type.CmdT.SET_SYS_VAR),
# # Type.Sign([Type.T.STR, Type.T.STR], Type.T.TER, Type.CmdT.SET_SYS_VAR)],
# # Type.CmdT.SLEEP: [Type.Sign([Type.T.NUM], Type.T.TER, Type.CmdT.SLEEP)]
# # }
#
# def __init__(self) -> None:
# raise NotImplementedError
#
# def __del__(self) -> None:
# raise NotImplementedError
#
# @classmethod
# def chk_t(cls, rt: Token.CmdTok) -> Optional[List[Type.Sign]]:
# """
# Type checker for system commands.
# It checks type of input command token and assigns return type as type information of the token.
#
# :param rt: Token to be type checked.
# :type rt: Token.FunTok
#
# :return: None if type check is successful. Candidate signatures if not.
# :rtype: Optional[List[Type.Signature]]
# """
# cand: List[Type.Sign] = cls.__sign.get(rt.v) # Candidate signatures
# infer: Type.Sign = Type.Sign([tok.t for tok in rt.chd], cand[0].ret_t, rt.v) # Inferred signature
#
# # Inferred signature must be one of candidates and return type is TER type.
# if infer in cand:
# rt.t = infer.ret_t
#
# return None
# else:
# return cand
#
# @classmethod
# def eval(cls, rt: Token.CmdTok) -> Tuple[Token.Tok, List[Warning.UtilWarn]]:
# warn: List[Warning.UtilWarn] = []
# buf: Type.BufT = Type.BufT.INTERNAL
#
# if rt.v == Type.CmdT.QUIT:
# # Raise quit exception.
# # This will be handled by main procedure, terminating the whole process.
# raise Error.UtilErr(Type.UtilErrT.QUIT)
# elif rt.v == Type.CmdT.HELP:
# return Token.VoidTok(), warn
# elif rt.v == Type.CmdT.GET_SYS_VAR:
# sys_var: Type.SysVar = SystemManager.SysManager.inst().get_sys_var(rt.chd[0].v)
#
# # Check whether input is valid.
# # The system variable to get must exist.
# if not sys_var:
# raise Error.UtilErr(Type.UtilErrT.NOT_FOUND, 26, id=rt.chd[0].v)
#
# # Get.
# return Token.StrTok(str(sys_var.v)), warn
# elif rt.v == Type.CmdT.SET_SYS_VAR:
# prev_v: Type.SysVar = SystemManager.SysManager.inst().get_sys_var(rt.chd[0].v)
#
# # Check for errors.
# # Set_sys_var command for system variable x generates error for followings cases.
# # 1. x is not existing system variable. (NOT_FOUND)
# # 2. x is read only. (RD_ONLY)
# # 3. Type of system variable and that of x dose not match. (T_MISMATCH)
# # The following logic is an implementation of these rules.
# if not prev_v:
# raise Error.UtilErr(Type.UtilErrT.NOT_FOUND, 26, id=rt.chd[0].v)
# elif prev_v.rd_only:
# raise Error.UtilErr(Type.UtilErrT.RD_ONLY, 28, id=rt.chd[0].v)
# elif prev_v.t != rt.chd[1].t:
# raise Error.UtilErr(Type.UtilErrT.T_MISMATCH, 30, id=rt.chd[0].v, corret_t=prev_v.t,
# wrong_t=rt.chd[1].t)
#
# if rt.chd[0].v in ["Computation_Timeout", "Input_Timeout"]:
# # Check for errors and warnings.
# # Set_sys_var command for timeout limits with second parameter x generates error or warning for
# # followings cases.
# # 1. x is +-inf. (INF_DETECT)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is finite negative. (DOMAIN_OUT)
# # 4. x is 0. (TURN_OFF)
# # 5. x is not in [0, 2147483647]. (DOMAIN_OUT)
# # 6. x is finite positive noninteger. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if is_bigint(rt.chd[1].v):
# warn.append(Warning.UtilWarn(Type.UtilWarnT.DOMAIN_OUT, 17))
# rt.chd[1].v = 2147483647
# elif is_smallint(rt.chd[1].v):
# raise Error.UtilErr(Type.UtilErrT.DOMAIN_OUT, 27)
# elif math.isinf(rt.chd[1].v):
# if rt.chd[1].v > 0:
# warn.append(Warning.UtilWarn(Type.UtilWarnT.INF_DETECT, 20))
# rt.chd[1].v = 0
# else:
# raise Error.UtilErr(Type.UtilErrT.INF_DETECT, 27)
# elif math.isnan(rt.chd[1].v):
# raise Error.UtilErr(Type.UtilErrT.NAN_DETECT, 27)
# elif rt.chd[1].v < 0:
# raise Error.UtilErr(Type.UtilErrT.DOMAIN_OUT, 27)
# elif rt.chd[1].v == 0:
# warn.append(Warning.UtilWarn(Type.UtilWarnT.TURN_OFF, 14))
# elif rt.chd[1].v % 1 != 0:
# warn.append(Warning.UtilWarn(Type.UtilWarnT.DOMAIN_OUT, 13))
# rt.chd[1].v = max(round(rt.chd[1].v), 1)
#
# # Set.
# SystemManager.SysManager.inst().set_sys_var(rt.chd[0].v, rt.chd[1].v)
#
# # Report.
# Printer.Printer.inst().buf(Printer.Printer.inst().f_title('system report'), buf)
# Printer.Printer.inst().buf(f'Updated system variable \"{rt.chd[0].v}\".', buf, indent=2)
# Printer.Printer.inst().buf(f'@target: {rt.chd[0].v}', buf, indent=4)
#
# if prev_v.v == 0:
# Printer.Printer.inst().buf('@from : 0 (Turn off)', buf, indent=4)
# else:
# Printer.Printer.inst().buf(f'@from : {prev_v.v}', buf, indent=4)
#
# if rt.chd[1].v == 0:
# Printer.Printer.inst().buf(f'@to : 0 (Turn off)', buf, indent=4)
# else:
# Printer.Printer.inst().buf(f'@to : {rt.chd[1].v}', buf, indent=4)
#
# Printer.Printer.inst().buf_newline(buf)
#
# return Token.VoidTok(), warn
# else:
# # Check for errors.
# # Set_sys_var command for system variable x generates error for followings cases.
# # 1. x is +-inf. (INF_DETECT)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is not in [0, 100000000.99999]. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if is_bigint(rt.chd[0].v) or is_smallint(rt.chd[0].v):
# raise Error.UtilErr(Type.UtilErrT.DOMAIN_OUT, 29)
# if math.isinf(rt.chd[0].v):
# raise Error.UtilErr(Type.UtilErrT.INF_DETECT, 29)
# elif math.isnan(rt.chd[0].v):
# raise Error.UtilErr(Type.UtilErrT.NAN_DETECT, 29)
# elif not (0 <= rt.chd[0].v <= 100000000.99999):
# raise Error.UtilErr(Type.UtilErrT.DOMAIN_OUT, 29)
#
# # Sleep.
# start: time = datetime.datetime.now()
# time.sleep(rt.chd[0].v)
# end: time = datetime.datetime.now()
#
# # Report.
# Printer.Printer.inst().buf(Printer.Printer.inst().f_title('system report'), buf)
# Printer.Printer.inst().buf(f'Tiny calculator slept for {rt.chd[0].v} seconds.', buf, indent=2)
# Printer.Printer.inst().buf(f'@start: {start}', buf, indent=4)
# Printer.Printer.inst().buf(f'@end : {end}', buf, indent=4)
# Printer.Printer.inst().buf_newline(buf)
#
# return Token.VoidTok(), warn
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,533 | eik4862/TinyCalculator | refs/heads/master | /Function/Hyperbolic.py | from typing import final
from Function import Function
class HypbolicFun(Function.Fun):
"""
Hyperbolic trigonometric function toolbox.
:cvar __sign: Signatures of hyperbolic trigonometric functions.
"""
def __new__(cls) -> None:
raise NotImplementedError
#
# @classmethod
# def __sinh(cls, x: float) -> float:
# """
# Sine hyperbolic function.
#
# Sine hyperbolic function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is +-inf, resp.
# 3. If x is finite, the result is ``math.sinh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where sine hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of sine hyperbolic function.
# :rtype: float
# """
# try:
# return math.sinh(x)
# except OverflowError:
# return math.inf if x > 0 else -math.inf
#
# @classmethod
# def __cosh(cls, x: float) -> float:
# """
# Cosine hyperbolic function.
#
# Cosine hyperbolic function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is inf.
# 3. If x is finite, the result is ``math.cosh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where cosine hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of cosine hyperbolic function.
# :rtype: float
# """
# try:
# return math.cosh(x)
# except OverflowError:
# return math.inf
#
# @classmethod
# def __tanh(cls, x: float) -> float:
# """
# Tangent hyperbolic function.
#
# Tangent hyperbolic function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is inf.
# 3. If x is finite, the result is ``math.cosh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where tangent hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of tangent hyperbolic function.
# :rtype: float
# """
# return math.tanh(x)
#
# @classmethod
# def __csch(cls, x: float) -> float:
# """
# Cosecant hyperbolic function.
#
# Cosecant hyperbolic function with parameter x has following computation rules.
# 1. If x is nan or 0, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is finite, the result is ``1 / math.sinh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where cosecant hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of cosecant hyperbolic function.
# :rtype: float
# """
# try:
# return math.nan if x == 0 else 1 / math.sinh(x)
# except OverflowError:
# return 0
#
# @classmethod
# def __sech(cls, x: float) -> float:
# """
# Secant hyperbolic function.
#
# Secant hyperbolic function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is finite, the result is ``1 / math.cosh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where secant hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of secant hyperbolic function.
# :rtype: float
# """
# try:
# return 1 / math.cosh(x)
# except OverflowError:
# return 0
#
# @classmethod
# def __coth(cls, x: float) -> float:
# """
# Cotangent hyperbolic function.
#
# Cotangent hyperbolic function with parameter x has following computation rules.
# 1. If x is nan or 0, the result is nan.
# 2. If x is +-inf, the result is +-1, resp.
# 3. If x is finite, the result is ``1 / math.tanh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where cotangent hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of cotangent hyperbolic function.
# :rtype: float
# """
# return math.nan if x == 0 else 1 / math.tanh(x)
#
# @classmethod
# def __asinh(cls, x: float) -> float:
# """
# Arcsine hyperbolic function.
#
# Arcsine hyperbolic function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is +-inf, resp.
# 3. If x is finite, the result is ``math.asinh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where arcsine hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of arcsine hyperbolic function.
# :rtype: float
# """
# return math.asinh(x)
#
# @classmethod
# def __acosh(cls, x: float) -> float:
# """
# Arccosine hyperbolic function.
#
# Arccosine hyperbolic function with parameter x has following computation rules.
# 1. If x is +inf, the result is inf.
# 2. If x is -inf or nan, the result is nan
# 3. If x is finite which is not in [1, inf), the result is nan.
# 4. If x is finite which is in [1, inf), the result is ``math.acosh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where arccosine hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of arccosine hyperbolic function.
# :rtype: float
# """
# return math.nan if x < 1 else math.acosh(x)
#
# @classmethod
# def __atanh(cls, x: float) -> float:
# """
# Arctangent hyperbolic function.
#
# Arctangent hyperbolic function with parameter x has following computation rules.
# 1. If x is +-inf or nan, the result is nan.
# 2. If x is finite which is not in (-1, 1), the result is nan.
# 3. If x is +-1, then the result is +-inf.
# 4. If x is finite which is in (-1, 1), the result is ``math.atanh(x)``.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# :param x: Point where arctangent hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of arctangent hyperbolic function.
# :rtype: float
# """
# return math.nan if x < -1 or x > 1 else math.inf if x == 1 else -math.inf if x == -1 else math.atanh(x)
#
# @classmethod
# def __acsch(cls, x: float) -> float:
# """
# Arccosecant hyperbolic function.
#
# Arccosecant hyperbolic function with parameter x has following computation rules.
# 1. If x is nan or 0, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is finite, the result is ``math.asinh(1 / x)``.
# Here, the rule 3 is based on identity ``acsch(x) = asinh(1 / x)``.
# For detail and more identities, consult the reference below.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions
#
# :param x: Point where arccosecant hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of arccosecant hyperbolic function.
# :rtype: float
# """
# return math.nan if x == 0 else math.asinh(1 / x)
#
# @classmethod
# def __asech(cls, x: float) -> float:
# """
# Arcsecant hyperbolic function.
#
# Arcsecant hyperbolic function with parameter x has following computation rules.
# 1. If x is nan or +-inf, the result is nan.
# 2. If x is 0, the result is inf.
# 3. If x is finite not in (0, 1], the result is nan.
# 4. If x is in (0, 1], the result is ``math.acosh(1 / x)``.
# Here, the rule 4 is based on identity ``asech(x) = acosh(1 / x)``.
# For detail and more identities, consult the reference below.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions
#
# :param x: Point where arcsecant hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of arcsecant hyperbolic function.
# :rtype: float
# """
# return math.nan if x < 0 or x > 1 else math.inf if x == 0 else math.acosh(1 / x)
#
# @classmethod
# def __acoth(cls, x: float) -> float:
# """
# Arccotangent hyperbolic function.
#
# Arccotangent hyperbolic function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is +-1, the result is +-inf.
# 4. If x is in (-1, 1), the result is nan.
# 5. If x is finite not in [-1, 1], the result is ``math.atanh(1 / x)``.
# Here, the rule 5 is based on identity ``acoth(x) = atanh(1 / x)``.
# For detail and more identities, consult the reference below.
#
# This method is private and called internally as a helper of ``HyperTri.simplify``.
# For detailed description for simplification, refer to the comments of ``HyperTri.simplify``.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions
#
# :param x: Point where arccotangent hyperbolic function is to be computed.
# :type x: float
#
# :return: Computed value of arccotangent hyperbolic function.
# :rtype: float
# """
# return math.nan if -1 < x < 1 else math.inf if x == 1 else -math.inf if x == -1 else math.atanh(1 / x)
#
# @classmethod
# def chk_t(cls, rt: Token.Fun) -> Optional[List[Type.Sign]]:
# """
# Type checker for hyperbolic trigonometric functions.
# It checks type of input function token and assigns return type as type information of the token.
#
# :param rt: Token to be type checked.
# :type rt: Token.Fun
#
# :return: None if type check is successful. Candidate signatures if not.
# :rtype: Optional[List[Type.Signature]]
# """
# cand: List[Type.Sign] = cls.__sign.get(rt.v) # Candidate signatures
# infer: Type.Sign = Type.Sign([tok.t for tok in rt.chd], Type.T.REAL, rt.v) # Inferred signature
#
# # Inferred signature must be one of candidates and return type is NUM type.
# if infer in cand:
# rt.t = Type.T.REAL
#
# return None
# else:
# return cand
#
# @classmethod
# def simplify(cls, rt: Token.Fun) -> Tuple[Token.Tok, List[Warning.InterpWarn]]:
# """
# Simplifier for hyperbolic trigonometric functions.
#
# It does following simplifications.
# 1. Constant folding.
# 2. Dead expression stripping.
# 3. Sign propagation.
# For details and detailed explanation of these optimization tricks, refer to the comments of
# ``Operator.simplify`` and references therein.
#
# :param rt: Root of AST to be simplified.
# :type rt: Token.Fun
#
# :return: Root of simplified AST and list of generated warnings.
# :rtype: Tuple[Token.Tok, List[Warning.InterpWarn]]
#
# :raise NAN_DETECT: If nan is detected as a given parameter.
# :raise IFN_DETECT: If inf is detected as a given parameter.
# :raise DOMAIN_OUT: If given parameter is not in domain.
# :raise POLE_DETECT: If mathematical pole is detected.
# :raise BIG_INT: If given parameter exceeds floating point max.
# :raise SMALL_INT: If given parameter exceeds floating point min.
# """
# warn: List[Warning.InterpWarn] = [] # List of generated warnings.
#
# if rt.v == Type.FunT.Sinh:
# # Check for warnings.
# # Sine hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Sinh"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Sinh"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Sinh"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Sinh"))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__sinh``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__sinh(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Sinh[-x] = -Sinh[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Cosh:
# # Check for warnings.
# # Cosine hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Cosh"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Cosh"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Cosh"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Cosh"))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__cosh``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__cosh(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Cosh[-x] = Cosh[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Tanh:
# # Check for warnings.
# # Tangent hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Tanh"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Tanh"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Tanh"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Tanh"))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__tanh``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__tanh(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Tanh[-x] = -Tanh[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Csch:
# # Check for warnings.
# # Cosecant hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is 0. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Csch"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Csch"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Csch"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Csch"))
# elif rt.chd[0].v == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 33))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__csch``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__csch(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Csch[-x] = -Csch[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Sech:
# # Check for warnings.
# # Secant hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Sech"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Sech"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Sech"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Sech"))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__sech``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__sech(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Sech[-x] = Sech[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Coth:
# # Check for warnings.
# # Cotangent hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is 0. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Coth"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Coth"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Coth"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Coth"))
# elif rt.chd[0].v == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 34))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__coth``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__coth(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Coth[-x] = -Coth[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcSinh:
# # Check for warnings.
# # Arcsine hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Asinh"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Asinh"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Asinh"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Asinh"))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__asinh``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__asinh(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Asinh[-x] = -Asinh[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcCosh:
# # Check for warnings.
# # Arccosine hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is not in [1, Inf). (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Acosh"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Acosh"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Acosh"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Acosh"))
# elif rt.chd[0].v < 1:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 21))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__acosh``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__acosh(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcTanh:
# # Check for warnings.
# # Arctangent hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)'
# # 4. x is +-1. (POLE_DETECT)
# # 5. x is not in (-1, 1). (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Atanh"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Atanh"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Atanh"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Atanh"))
# elif abs(rt.chd[0].v) == 1:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 32))
# elif not (-1 < rt.chd[0].v < 1):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 22))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__atanh``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__atanh(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Atanh[-x] = -Atanh[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcCsch:
# # Check for warnings.
# # Arccosecant hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is 0. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Acsch"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Acsch"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Acsch"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Acsch"))
# elif rt.chd[0].v == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 35))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__acsch``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__acsch(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Acsch[-x] = -Acsch[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.ArcSech:
# # Check for warnings.
# # Arcsecant hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is 0. (POLE_DETECT)
# # 5. x is not in (0, 1]. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Asech"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Asech"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Asech"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Asech"))
# elif rt.chd[0].v == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 37))
# elif not (0 < rt.chd[0].v <= 1):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 36))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__asech``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__asech(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# else:
# # Check for warnings.
# # Arccotangent hyperbolic function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is +-1. (POLE_DETECT)
# # 5. x is in (-1, 1). (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle="Acoth"))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle="Acoth"))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle="Acoth"))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle="Acoth"))
# elif abs(rt.chd[0].v) == 1:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 39))
# elif -1 < rt.chd[0].v < 1:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 38))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``HyperTri.__acoth``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__acoth(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Acoth[-x] = -Acoth[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
#
# @classmethod
# def test(cls, fun: Type.FunT, test_in: List[List[Decimal]]) -> List[Decimal]:
# """
# Test function for hyperbolic trigonometric function.
#
# It just call corresponding target function and evaluate it at test input points.
#
# :param fun: Function to be tested.
# :type fun: Type.FunT
# :param test_in: Test input.
# :type test_in: List[List[Decimal]]
#
# :return: Test output.
# :rtype: List[Decimal]
# """
# if fun == Type.FunT.Sinh:
# return list(map(lambda x: Decimal(cls.__sinh(float(*x))), test_in))
# elif fun == Type.FunT.Cosh:
# return list(map(lambda x: Decimal(cls.__cosh(float(*x))), test_in))
# elif fun == Type.FunT.Tanh:
# return list(map(lambda x: Decimal(cls.__tanh(float(*x))), test_in))
# elif fun == Type.FunT.Csch:
# return list(map(lambda x: Decimal(cls.__csch(float(*x))), test_in))
# elif fun == Type.FunT.Sech:
# return list(map(lambda x: Decimal(cls.__sech(float(*x))), test_in))
# elif fun == Type.FunT.Coth:
# return list(map(lambda x: Decimal(cls.__coth(float(*x))), test_in))
# elif fun == Type.FunT.ArcSinh:
# return list(map(lambda x: Decimal(cls.__asinh(float(*x))), test_in))
# elif fun == Type.FunT.ArcCosh:
# return list(map(lambda x: Decimal(cls.__acosh(float(*x))), test_in))
# elif fun == Type.FunT.ArcTanh:
# return list(map(lambda x: Decimal(cls.__atanh(float(*x))), test_in))
# elif fun == Type.FunT.ArcCsch:
# return list(map(lambda x: Decimal(cls.__acsch(float(*x))), test_in))
# elif fun == Type.FunT.ArcSech:
# return list(map(lambda x: Decimal(cls.__asech(float(*x))), test_in))
# else:
# return list(map(lambda x: Decimal(cls.__acoth(float(*x))), test_in))
@final
class Sinh(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Cosh(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Tanh(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Csch(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Sech(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Coth(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcSinh(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcCosh(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcTanh(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcCsch(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcSech(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class ArcCoth(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Gudermannian(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class InverseGudermannian(HypbolicFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,534 | eik4862/TinyCalculator | refs/heads/master | /Operator/Assign.py | from __future__ import annotations
from typing import final, Final, Tuple, Dict, Optional, List
from Core import Token, TypeSystem
from Operator import Operator
class AsgnOp(Operator.Op):
__ARGC: Final[int] = 2
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@final
class Asgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '='
__SGN: Final[List[str]] = ['Sym = Real -> Real',
'Sym = Cmplx -> Cmplx',
'Sym = Str -> Str',
'Sym = Bool -> Bool',
'Sym = Sym -> Sym',
'Sym = List of Real (n fold) -> List of Real (n fold)',
'Sym = List of Cmplx (n fold) -> List of Cmplx (n fold)',
'Sym = List of Str (n fold) -> List of Str (n fold)',
'Sym = List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if not (type(t1) == TypeSystem.Sym) or type(t2) == TypeSystem.Void:
return None
else:
rt.t = t2
else:
return None
return t_env
@final
class AddAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '+='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class SubAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '-='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class MulAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '*='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class MatMulAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '%*%='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class DivAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '/='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class RemAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '%='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class QuotAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '//='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class PowAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '**='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class AndAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '&='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class OrAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '|='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class XorAsgn(AsgnOp):
__PRECD: Final[Tuple[int, int]] = (1, 2)
__SYM: Final[str] = '^='
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,535 | eik4862/TinyCalculator | refs/heads/master | /Warning/Warning.py | from typing import final, Dict, Any
from Core import Type
class Warn:
def __init__(self, warnno: int) -> None:
self.__warnno: int = warnno
@property
def warnno(self) -> int:
return self.__warnno
class ParserWarn(Warn):
def __init__(self, warnno: int) -> None:
super().__init__(warnno)
@final
class InterpWarn(Warn):
"""
Interpreter warning class.
:ivar __warn_t: Warning type.
:ivar __warn_no: Warning code.
:ivar __extra_info: Extra information.
"""
def __init__(self, warn_t: Type.InterpWarnT, warn_no: int, **kwargs: Any) -> None:
super().__init__()
self.__warn_t: Type.InterpWarnT = warn_t
self.__warn_no: int = warn_no
self.__extra_info: Dict[str, Any] = kwargs
def __del__(self) -> None:
pass
@property
def warn_t(self) -> Type.InterpWarnT:
"""
Getter for interpreter warning type.
:return: Interpreter warning type.
:rtype: Type.InterpWarnT
"""
return self.__warn_t
@property
def warn_no(self) -> int:
"""
Getter for warning code.
:return: Warning code.
:rtype: int
"""
return self.__warn_no
@property
def handle(self) -> str:
"""
Getter for erroneous function handle.
:return: Erroneous function handle.
:rtype: str
"""
return self.__extra_info.get('handle')
@property
def arg_pos(self) -> int:
"""
Getter for the position of erroneous operand.
:return: Erroneous operand position.
:rtype: int
"""
return self.__extra_info.get('arg_pos')
@final
class UtilWarn(Warn):
"""
Utility command warning type.
:ivar __warn_t: Warning type.
:ivar __warn_no: Warning code.
"""
def __init__(self, warn_t: Type.UtilWarnT, warn_no: int) -> None:
super().__init__()
self.__warn_t: Type.UtilWarnT = warn_t
self.__warn_no: int = warn_no
def __del__(self) -> None:
pass
@property
def warn_t(self) -> Type.UtilWarnT:
"""
Getter for utility command warning type.
:return: Utility command warning type.
:rtype: Type.UtilWarnT
"""
return self.__warn_t
@property
def warn_no(self) -> int:
"""
Getter for warning code.
:return: Warning code.
:rtype: int
"""
return self.__warn_no
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,536 | eik4862/TinyCalculator | refs/heads/master | /Util/Printer.py | import sys
from typing import final, TextIO, Final, List, Union
from Core import Type
# TODO: floating point printing mode
# TODO: Long int ... printing
@final
class Printer:
"""
Buffer strings and print out them to target file.
This class is implemented as singleton.
For the concept of singleton pattern, consult the references below.
**Reference**
* https://en.wikipedia.org/wiki/Singleton_pattern
:cvar __TITLE_LEN: Length of title.
:cvar __PROG_LEN: Length of progress bar.
:cvar __TITLE_FILL: Placeholder in title.
:cvar __PROG_FILL: Placeholder in progress bar.
:cvar __HLINE_FILL: Marker in horizontal line of table.
:cvar __BLUE_TEMP: Blue bold style template.
:cvar __RED_TEMP: Red bold style template.
:cvar __inst: Singleton object.
:ivar __stdout: Buffer for std output.
:ivar __stderr: Buffer for std error.
:ivar __stdwarn: Buffer for std warning.
:ivar __internal: Buffer for internal output.
:ivar __debug: Buffer for debugging.
"""
__TITLE_LEN: Final[int] = 44
__PROG_LEN: Final[int] = 37
__TB_SEP: Final[int] = 2
__TITLE_FILL: Final[str] = '-'
__PROG_FILL: Final[str] = '.'
__HLINE_FILL: Final[str] = '-'
__BLUE_TEMP: Final[str] = '\033[1;36m$1\033[0m'
__RED_TEMP: Final[str] = '\033[1;31m$1\033[0m'
__inst = None
def __init__(self) -> None:
self.__stdout: str = ''
self.__stderr: str = ''
self.__stdwarn: str = ''
self.__internal: str = ''
self.__debug: str = ''
def __del__(self) -> None:
pass
@classmethod
def inst(cls):
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Printer
"""
if not cls.__inst:
cls.__inst = Printer()
return cls.__inst
def buf(self, string: str, buf: Type.BufT = Type.BufT.STDOUT, newline: bool = True, indent: int = 0) -> None:
"""
Buffer input string to target buffer.
:param string: String to be buffered.
:type string: str
:param buf: Buffer where input content will be buffered. (Default: Type.PrinterBuf.STDOUT)
:type buf: Type.BufT
:param newline: Flag for newline character at the end of string to be buffered. (Default: True)
:type newline: bool
:param indent: The # of leading indentation (white space). (Default: 0)
:type indent: int
"""
if buf == Type.BufT.STDOUT:
self.__stdout += ' ' * indent + string + '\n' if newline else ' ' * indent + string
elif buf == Type.BufT.STDERR:
self.__stderr += ' ' * indent + string + '\n' if newline else ' ' * indent + string
elif buf == Type.BufT.INTERNAL:
self.__internal += ' ' * indent + string + '\n' if newline else ' ' * indent + string
elif buf == Type.BufT.DEBUG:
self.__debug += ' ' * indent + string + '\n' if newline else ' ' * indent + string
else:
self.__stdwarn += ' ' * indent + string + '\n' if newline else ' ' * indent + string
def buf_newline(self, buf: Type.BufT = Type.BufT.STDOUT) -> None:
"""
Buffer newline character to target buffer.
:param buf: Buffer where newline character will be buffered. (Default: Type.PrinterBuf.STDOUT)
:type buf: Type.BufT
"""
self.buf('', buf, True)
def pop(self, buf: Type.BufT = Type.BufT.STDOUT) -> None:
"""
Pop the last character from target buffer.
:param buf: Buffer to be popped. (Default: Type.PrinterBuf.STDOUT)
:type buf: Type.BufT
"""
if buf == Type.BufT.STDOUT:
self.__stdout = self.__stdout[:-1]
elif buf == Type.BufT.STDERR:
self.__stderr = self.__stderr[:-1]
elif buf == Type.BufT.INTERNAL:
self.__internal = self.__internal[:-1]
elif buf == Type.BufT.STDWARN:
self.__stdwarn = self.__stdwarn[:-1]
else:
self.__debug = self.__debug[:-1]
def print(self, buf: Type.BufT = Type.BufT.STDOUT, to: TextIO = sys.stdout) -> None:
"""
Print buffered string in target buffer to target file.
After printing, the buffer will be cleared.
:param buf: Buffer whose content is to be printed out. (Default: Type.PrinterBuf.STDOUT)
:type buf: Type.BufT
:param to: File where content will be written. (Default: sys.stdout)
:type to: TextIO
"""
if buf == Type.BufT.STDOUT:
print(self.__stdout, end='', file=to)
self.__stdout = ''
elif buf == Type.BufT.STDERR:
print(self.__stderr, end='', file=to)
self.__stderr = ''
elif buf == Type.BufT.INTERNAL:
print(self.__internal, end='', file=to)
self.__internal = ''
elif buf == Type.BufT.STDWARN:
print(self.__stdwarn, end='', file=to)
self.__stdwarn = ''
else:
print(self.__debug, end='', file=to)
self.__debug = ''
def sprint(self, buf: Type.BufT = Type.BufT.STDOUT) -> str:
"""
Print buffered string in target buffer as a string.
After printing, the buffer will be cleared.
:param buf: Buffer whose content is to be printed out. (Default: Type.PrinterBuf.STDOUT)
:type buf: Type.BufT
"""
if buf == Type.BufT.STDOUT:
content: str = self.__stdout
self.__stdout = ''
return content
elif buf == Type.BufT.STDERR:
content: str = self.__stderr
self.__stderr = ''
return content
elif buf == Type.BufT.INTERNAL:
content: str = self.__internal
self.__internal = ''
return content
elif buf == Type.BufT.STDWARN:
content: str = self.__stdwarn
self.__stdwarn = ''
return content
else:
content: str = self.__debug
self.__debug = ''
return content
def clr(self, buf: Type.BufT = Type.BufT.STDOUT) -> None:
"""
Clear target buffer without printing out.
:param buf: Buffer to be cleared.
:type buf: Type.BufT
"""
if buf == Type.BufT.STDOUT:
self.__stdout = ''
elif buf == Type.BufT.STDERR:
self.__stderr = ''
elif buf == Type.BufT.INTERNAL:
self.__internal = ''
elif buf == Type.BufT.STDWARN:
self.__stdwarn = ''
else:
self.__debug = ''
def f_title(self, string: str, length: int = None) -> str:
"""
Format string as a title of specific length.
Title is aligned to center using title placeholder character ``Printer.__TITLE_FILL``.
The length of title is optional and will be default length ``Printer.__TITLE_LEN`` if not given.
:param string: String to be formatted.
:type string: str
:param length: Length of title. (Default: None)
:type length: int
:return: Formatted string.
:rtype: str
"""
length = self.__TITLE_LEN if not length else length
if len(string) >= length:
return string.upper()
head_cnt: int = round((length - len(string) - 2) / 2)
tail_cnt: int = length - len(string) - head_cnt - 2
return f'{self.__TITLE_FILL * head_cnt} {string.upper()} {self.__TITLE_FILL * tail_cnt}'
def f_prog(self, string: str, length: int = None) -> str:
"""
Format string as a line of progress line.
Status line is aligned to left using progress line placeholder ``Printer.__PROG_FILL``.
The length of progress line is optional and will be default length ``Printer.__PROG_LEN`` if not given.
:param string: String to be formatted.
:type string: str
:param length: (Default: None)
:type length: int
:return: Formatted string.
:rtype: str
"""
length = self.__PROG_LEN if not length else length
if len(string) >= length:
return string
trailing_cnt = length - len(string)
return f'{string}{self.__PROG_FILL * trailing_cnt} '
def f_col(self, string: str, col: Type.Col) -> str:
"""
Colorize string.
In addition, it gives the string bold style.
:param string: String to be colorized.
:type string: str
:param col: Type of color for colorizing.
:type col: Type.Col
:return: Colorized string.
:rtype: str
"""
if col == Type.Col.RED:
return self.__RED_TEMP.replace('$1', string)
else:
return self.__BLUE_TEMP.replace('$1', string)
def f_ord(self, n: int) -> str:
"""
Generate ordinal expression of natural number.
:param n: Natural number whose ordinal expression is to be generated.
:type n: int
:return: Ordinal expression.
:rtype: str
"""
if 11 <= n <= 13:
return f'{n}th'
elif n % 10 == 1:
return f'{n}st'
elif n % 10 == 2:
return f'{n}nd'
elif n % 10 == 3:
return f'{n}rd'
else:
return f'{n}th'
def f_tb(self, string: List[str], col_width: Union[List[int], int], sep: int = None) -> str:
if not sep:
sep = self.__TB_SEP
if isinstance(col_width, int):
col_width = [col_width] * len(string)
buf: str = ''
for i in range(len(string)):
if col_width[i] > len(string[i]):
head_cnt: int = round((col_width[i] - len(string[i])) / 2)
tail_cnt: int = col_width[i] - len(string[i]) - head_cnt
buf += ' ' * (head_cnt + sep) + string[i].upper() + ' ' * tail_cnt
else:
buf += ' ' * sep + string[i].upper()
return buf[sep:]
def f_hline(self, col_width: Union[List[int], int], col_num: int = None, sep: int = None) -> str:
if not sep:
sep = self.__TB_SEP
if isinstance(col_width, int):
col_width = [col_width] * col_num
buf: str = ''
for width in col_width:
buf += ' ' * sep + self.__HLINE_FILL * width
return buf[sep:]
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,537 | eik4862/TinyCalculator | refs/heads/master | /Core/SystemManager.py | import signal
import sys
from contextlib import contextmanager
from typing import Dict, final, List
from Core import Type
from Error import Error
from Util import Printer
def sigint_handler(sig, frame) -> None:
"""
Signal handler for interrupt signal (ctrl + C).
Print a message and terminate the whole process.
:param sig: Signal number to be handled.
:param frame: Frame where signal is sent.
"""
print(f'Tiny calculator received SIGINT({sig}). Terminate.')
sys.exit(0)
def sigtstp_handler(sig, frame) -> None:
"""
Signal handler for stop signal (ctrl + Z).
Print a message and terminate the whole process.
:param sig: Signal number to be handled.
:param frame: Frame where signal is sent.
"""
print(f'Tiny calculator received SIGTSTP({sig}). Terminate.')
sys.exit(0)
def sigalrm_handler(sig, frame) -> None:
raise Error.SysErr(Type.SysErrT.TIMEOUT)
@contextmanager
def timeout(lim: int) -> None:
try:
signal.signal(signal.SIGALRM, sigalrm_handler)
except OSError as os_err:
raise Error.SysErr(Type.SysErrT.REG_FAIL, sig='SIGALRM', err_str=os_err.strerror)
if lim > 0:
signal.alarm(lim)
try:
yield
except TimeoutError:
pass
finally:
signal.alarm(0)
try:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
except OSError as os_err:
raise Error.SysErr(Type.SysErrT.UNREG_FAIL, sig='SIGALRM', err_str=os_err.strerror)
@final
class SysManager:
__inst = None
def __init__(self) -> None:
self.__sys_var: Dict[str, Type.SysVar] = {
'Author': Type.SysVar('PSH (lkd1962@naver.com)'),
'Version': Type.SysVar('0.0.1'),
'Computation_Timeout': Type.SysVar(3, False),
'Input_Timeout': Type.SysVar(100, False)
}
self.__sig_handler: List[Type.SigHandler] = [
Type.SigHandler(signal.SIGINT, sigint_handler, 'SIGINT'),
Type.SigHandler(signal.SIGTSTP, sigtstp_handler, 'SIGTSTP'),
]
def __del__(self) -> None:
pass
@classmethod
def inst(cls):
if not cls.__inst:
cls.__inst = SysManager()
return cls.__inst
def reg_sighandler(self, debug: bool = False, buf: Type.BufT = Type.BufT.DEBUG) -> None:
if debug:
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('signal handler info'), buf)
Printer.Printer.inst().buf('@target: ', buf, indent=2)
for i in range(len(self.__sig_handler)):
Printer.Printer.inst().buf(f'[{i}] {self.__sig_handler[i].brief}({self.__sig_handler[i].sig})', buf,
indent=4)
Printer.Printer.inst().buf_newline(buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('registering signal handler'), buf)
for handler in self.__sig_handler:
Printer.Printer.inst().buf(
Printer.Printer.inst().f_prog(f'Registering {handler.brief} handler'), buf, False, 2)
try:
signal.signal(handler.sig, handler.handler)
except OSError as os_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
raise Error.SysErr(Type.SysErrT.REG_FAIL, sig=handler.brief, err_str=os_err.strerror)
else:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
Printer.Printer.inst().buf(f'@handler: {handler.handler}', buf, indent=4)
Printer.Printer.inst().buf_newline(buf)
else:
for handler in self.__sig_handler:
try:
signal.signal(handler.sig, handler.handler)
except OSError as os_err:
raise Error.SysErr(Type.SysErrT.REG_FAIL, sig=handler.brief, err_str=os_err.strerror)
def get_sys_var(self, k: str) -> Type.SysVar:
return self.__sys_var.get(k)
def set_sys_var(self, k: str, v: int) -> None:
self.__sys_var[k] = Type.SysVar(v, Type.T.REAL, False)
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,538 | eik4862/TinyCalculator | refs/heads/master | /Core/Main.py | import sys
from decimal import getcontext
from typing import List, TextIO
from Core import Parser, Type, AST, Interpreter, SystemManager, ErrorManager, DB, WarningManager, TypeChecker
from Error import Error
from Util import Printer
def main(debug: bool = False, verb: bool = False, to: TextIO = sys.stdout) -> None:
"""
Main routine for tiny calculator.
It simply repeats following four steps.
1. Take user input.
2. Parser parse the input, generating AST.
3. Interpreter interpret AST, computing the result.
4. Print out the result.
This method supports modes.
1. Verbose debug mode.
In this mode, it takes test inputs from DB and print out detailed results to stdout.
2. Silent debug mode.
In this mode, it takes test inputs from DB and compare the output with correct output.
Unlike verbose debug mode, it does not print the result to stdout.
Instead, it shows whether the test is passed of failed.
3. Verbose user mode.
In this mode, it takes input from the user through stdin and prints out detailed results to stdout.
4. Silent user mode.
In this mode, it takes input from the user through stdin and prints out simple result to stdout.
This mode is default mode.
:param debug: Flag for debug mode. (Default: False)
:type debug: bool
:param verb: Flag for verbose mode. (Default: False)
:type debug: bool
:param to: File where the result is to be printed out. (Default: sys.stdout)
:type to: TextIO
"""
if debug:
# Attach signal handler.
try:
SystemManager.SysManager.inst().reg_sighandler(True)
except Error.SysErr as sys_err:
# Signal handler registration failure is critical and cannot be recovered.
# Terminate the whole process.
ErrorManager.ErrManager.inst().handle_err(sys_err)
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
sys.exit(1)
else:
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
# Load DB.
try:
DB.DB.inst().load(True)
except Error.DBErr as DB_err:
# DB error is critical and cannot be recovered.
# Terminate the whole process.
ErrorManager.ErrManager.inst().handle_err(DB_err)
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
sys.exit(1)
else:
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
if verb:
for cnt in range(DB.DB.inst().get_sz('debug_in')):
line: str = DB.DB.inst().get_debug_in(cnt) # Input to be tested.
Printer.Printer.inst().buf(f'TEST #{cnt}', Type.BufT.DEBUG)
# Parse and interpret.
try:
expr: AST.AST = Parser.Parser.inst().parse(line, True) # Generated AST.
# Print out buffered output.
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
expr = Interpreter.Interp.inst().interp(expr, True)
except Error.UtilErr as util_err:
if util_err.t == Type.UtilErrT.QUIT:
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('BYE'))
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
Printer.Printer.inst().print(to=to)
sys.exit(0)
ErrorManager.ErrManager.inst().handle_err(util_err)
expr = None
except Error.Err as err:
ErrorManager.ErrManager.inst().handle_err(err)
expr = None
# Process warnings.
for warn in WarningManager.WarnManager.inst().q:
WarningManager.WarnManager.inst().handle_warn(warn)
WarningManager.WarnManager.inst().clr()
# Print out all buffered outputs in right order.
if expr and expr.rt.tok_t != Type.TokT.VOID:
Printer.Printer.inst().buf(expr.infix())
Printer.Printer.inst().buf_newline()
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
Printer.Printer.inst().print(Type.BufT.STDWARN, to=to)
Printer.Printer.inst().print(Type.BufT.INTERNAL, to=to)
Printer.Printer.inst().print(Type.BufT.STDOUT, to=to)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
else:
import time
import re
# Target functions to be tested.
target: List[str] = [
'Parser.__init()',
'Parser.__lexer()',
'Parser.__add_tok()',
'Parser.__infix_to_postfix()',
'AST.__infix_hlpr()',
'AST.__postfix_hlpr()',
'AST.__prefix_hlpr()',
'Interpreter.__chk_t()',
'Interpreter.__simplify()',
'Operator.chk_t()',
'Operator.simplify()',
'Trigonometric.chk_t()',
'Trigonometric.simplify()',
'System.chk_t()'
]
tot: int = DB.DB.inst().get_sz('debug_in') # Total # of tests.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('debug test info'))
Printer.Printer.inst().buf(f'@size : {tot}', indent=2)
Printer.Printer.inst().buf('@target:', indent=2)
for i in range(len(target)):
Printer.Printer.inst().buf(f'[{i}] {target[i]}', indent=4)
Printer.Printer.inst().buf_newline()
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('start test'))
Printer.Printer.inst().print(to=to)
fail_idx: List[int] = [] # Idx of failed tests.
start: float = time.process_time() # Time stamp for elapsed time measuring.
for cnt in range(DB.DB.inst().get_sz('debug_in')):
line: str = DB.DB.inst().get_debug_in(cnt) # Test input.
out: str = '' # Test output.
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog(f'[{cnt}] Running test case #{cnt}'),
newline=False, indent=2)
# Parse and interpret.
try:
expr: AST.AST = Parser.Parser.inst().parse(line, True) # Generated AST.
# Print out buffered output.
out += Printer.Printer.inst().sprint(Type.BufT.DEBUG)
expr = Interpreter.Interp.inst().interp(expr, True)
except Error.UtilErr as util_err:
# TODO: Here, we need more delicacy
if util_err.t == Type.UtilErrT.QUIT:
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('BYE'), Type.BufT.INTERNAL)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
Printer.Printer.inst().print(to=to)
except Error.Err as err:
ErrorManager.ErrManager.inst().handle_err(err)
expr = None
# Process warnings.
for warn in WarningManager.WarnManager.inst().q:
WarningManager.WarnManager.inst().handle_warn(warn)
WarningManager.WarnManager.inst().clr()
# Print out all buffered outputs in right order.
out += Printer.Printer.inst().sprint(Type.BufT.DEBUG)
out += Printer.Printer.inst().sprint(Type.BufT.STDWARN)
if expr and expr.rt.tok_t != Type.TokT.VOID:
out += f'{expr.infix()}\n\n'
out += Printer.Printer.inst().sprint(Type.BufT.STDERR)
out = re.sub(r'\d+ iteration', '1000 iteration', out)
# Compare the result with answer.
if out == DB.DB.inst().get_debug_out(f'TEST #{cnt}'):
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('pass', Type.Col.BLUE))
else:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED))
fail_idx.append(cnt)
Printer.Printer.inst().print(to=to)
cnt += 1
elapsed: float = time.process_time() - start # Elapsed time.
fail: int = len(fail_idx) # # of failed tests.
succ: int = tot - fail # # of passed tests.
Printer.Printer.inst().buf_newline()
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('test finished'))
Printer.Printer.inst().buf(f'@total : {tot:3d}', indent=2)
Printer.Printer.inst().buf(f'@pass : {succ:3d} ({succ / tot * 100:.02f}%)', indent=2)
Printer.Printer.inst().buf(f'@fail : {fail:3d} ({fail / tot * 100:.02f}%)', indent=2)
Printer.Printer.inst().buf(f'@elapsed: {elapsed * 1000:.2f}ms', indent=2)
Printer.Printer.inst().print(to=to)
if fail:
Printer.Printer.inst().buf_newline()
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('fail report'))
for idx in fail_idx:
Printer.Printer.inst().buf(f'@Test #{idx}: {DB.DB.inst().get_debug_in(idx)}', indent=2)
Printer.Printer.inst().print(to=to)
else:
# Attach signal handler.
try:
SystemManager.SysManager.inst().reg_sighandler()
except Error.SysErr as sys_err:
# Signal handler registration failure is critical and cannot be recovered.
# Terminate the whole process.
ErrorManager.ErrManager.inst().handle_err(sys_err)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
sys.exit(1)
# Load DB.
try:
DB.DB.inst().load(False)
except Error.DBErr as DB_err:
# DB error is critical and cannot be recovered.
# Terminate the whole process.
ErrorManager.ErrManager.inst().handle_err(DB_err)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
sys.exit(1)
# Print out greeting message.
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('HELLO'))
Printer.Printer.inst().print(to=to)
if verb:
while True:
try:
with SystemManager.timeout(SystemManager.SysManager.inst().get_sys_var('Input_Timeout').v):
line: str = input('>> ') # User input.
except Error.SysErr as sys_err:
sys_err.err_no = 25
ErrorManager.ErrManager.inst().handle_err(sys_err)
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('BYE'))
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
Printer.Printer.inst().print(to=to)
sys.exit(0)
# Parse and interpret.
try:
expr: AST.AST = Parser.Parser.inst().parse(line, True) # Generated AST.
# Print out buffered output.
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
TypeChecker.TChker.inst().chk_t(expr)
# Print out buffered output.
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
# expr = Interpreter.Interp.inst().interp(expr, True)
except Error.UtilErr as util_err:
if util_err.t == Type.UtilErrT.QUIT:
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('BYE'))
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
Printer.Printer.inst().print(to=to)
sys.exit(0)
ErrorManager.ErrManager.inst().handle_err(util_err)
expr = None
except Error.Err as err:
ErrorManager.ErrManager.inst().handle_err(err)
expr = None
# Process warnings.
for warn in WarningManager.WarnManager.inst().q:
WarningManager.WarnManager.inst().handle_warn(warn)
WarningManager.WarnManager.inst().clr()
if expr:
Printer.Printer.inst().buf(str(expr))
Printer.Printer.inst().buf_newline()
# Print out all buffered outputs in right order.
Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
Printer.Printer.inst().print(Type.BufT.STDWARN, to=to)
Printer.Printer.inst().print(Type.BufT.INTERNAL, to=to)
Printer.Printer.inst().print(Type.BufT.STDOUT, to=to)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
else:
while True:
try:
with SystemManager.timeout(SystemManager.SysManager.inst().get_sys_var('Input_Timeout').v):
line: str = input('>> ') # User input.
except Error.SysErr as sys_err:
sys_err.err_no = 25
ErrorManager.ErrManager.inst().handle_err(sys_err)
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('BYE'))
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
Printer.Printer.inst().print(to=to)
sys.exit(0)
# Parse and interpret.
try:
expr: AST.AST = Parser.Parser.inst().parse(line) # Generated AST.
# expr = Interpreter.Interp.inst().interp(expr)
except Error.UtilErr as util_err:
if util_err.t == Type.UtilErrT.QUIT:
Printer.Printer.inst().buf(DB.DB.inst().get_greet_msg('BYE'))
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
Printer.Printer.inst().print(to=to)
sys.exit(0)
ErrorManager.ErrManager.inst().handle_err(util_err)
expr = None
except Error.Err as err:
ErrorManager.ErrManager.inst().handle_err(err)
expr = None
# Process warnings.
for warn in WarningManager.WarnManager.inst().q:
WarningManager.WarnManager.inst().handle_warn(warn)
WarningManager.WarnManager.inst().clr()
# Print out all buffered outputs in right order.
if expr and expr.rt.tok_t != Type.TokT.VOID:
Printer.Printer.inst().buf(expr.infix())
Printer.Printer.inst().buf_newline()
Printer.Printer.inst().print(Type.BufT.STDWARN, to=to)
Printer.Printer.inst().print(Type.BufT.INTERNAL, to=to)
Printer.Printer.inst().print(Type.BufT.STDOUT, to=to)
Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
# def test(target: Type.FunT = None, verb: bool = False, to: TextIO = sys.stdout) -> None:
# from Test import TestManager
#
# # Load DB.
# try:
# DB.DB.inst().load_test()
# except Error.DBErr as DB_err:
# # DB error is critical and cannot be recovered.
# # Terminate the whole process.
# ErrorManager.ErrManager.inst().handle_err(DB_err)
# Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
# Printer.Printer.inst().print(Type.BufT.STDERR, to=to)
# sys.exit(1)
# else:
# Printer.Printer.inst().print(Type.BufT.DEBUG, to=to)
#
# # Set precision.
# getcontext().prec = 300
#
# # Run test.
# TestManager.TestManager.inst().test(target, verb)
#
# # Print out all buffered outputs in right order.
# Printer.Printer.inst().print(Type.BufT.DEBUG)
if __name__ == '__main__':
# to = open('../Data/Debug.out', 'w')
# test()
main(False, True)
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,539 | eik4862/TinyCalculator | refs/heads/master | /Core/Type.py | import sys
from enum import Enum, auto
from typing import final, List, Union, Callable
@final
class Const(Enum):
"""
Constant types.
Constant are approximated with 80 significant digits.
For definition or details on each function consult the references below.
**Reference**
* https://en.wikipedia.org/wiki/Pi
* https://en.wikipedia.org/wiki/E_(mathematical_constant)
* https://en.wikipedia.org/wiki/Euler–Mascheroni_constant
* https://en.wikipedia.org/wiki/Golden_ratio
* https://en.wikipedia.org/wiki/Golden_angle
* https://en.wikipedia.org/wiki/Catalan%27s_constant
* https://en.wikipedia.org/wiki/Glaisher–Kinkelin_constant
* https://en.wikipedia.org/wiki/Khinchin%27s_constant
* https://docs.python.org/3/library/sys.html
:cvar Pi: Pi.
:cvar E: Base of natural logarithm.
:cvar Degree: Conversion factor from degree to radian. (pi / 180)
:cvar EulerGamma: Euler–Mascheroni constant.
:cvar GoldenRatio: Golden ratio.
:cvar GoldenAngle: Golden angle.
:cvar Catalan: Catalan's constant.
:cvar Glaisher: Glaisher's constant.
:cvar Khinchin: Khinchin's constant.
:cvar Eps: Machine epsilon for floating point number.
:cvar FloatMax: Largest expressible floating point number.
:cvar FloatMin: Smallest expressible floating point number.
"""
Pi: float = 3.1415926535897932384626433832795028841971693993751058209749445923078164062862090
E: float = 2.7182818284590452353602874713526624977572470936999595749669676277240766303535476
Degree: float = 0.017453292519943295769236907684886127134428718885417254560971914401710091146034494
EulerGamma: float = 0.57721566490153286060651209008240243104215933593992359880576723488486772677766467
GoldenRatio: float = 1.6180339887498948482045868343656381177203091798057628621354486227052604628189024
GoldenAngle: float = 2.3999632297286533222315555066336138531249990110581150429351127507313073382394388
Catalan: float = 0.91596559417721901505460351493238411077414937428167213426649811962176301977625477
Glaisher: float = 1.2824271291006226368753425688697917277676889273250011920637400217404063088588265
Khinchin: float = 2.6854520010653064453097148354817956938203822939944629530511523455572188595371520
Eps: float = sys.float_info.epsilon
FloatMax: float = sys.float_info.max
FloatMin: float = sys.float_info.min
End: float = 0
def __str__(self) -> str:
return self.name
@final
class BufT(Enum):
"""
Buffer type for printer module.
:cvar STDOUT: Standard output.
:cvar STDERR: Standard error.
:cvar STDWARN: Warning buffer.
:cvar DEBUG: Debug buffer.
:cvar INTERNAL: Internal buffer.
"""
STDOUT = auto()
STDERR = auto()
STDWARN = auto()
DEBUG = auto()
INTERNAL = auto()
@final
class Col(Enum):
"""
Color type for printer module.
:cvar RED: Bold red style.
:cvar BLUE: Bold blue style.
"""
RED = auto()
BLUE = auto()
@final
class SysErrT(Enum):
"""
System error types.
:cvar REG_FAIL: Fail to register signal handler.
:cvar UNREG_FAIL: Fail to unregister signal handler.
:cvar TIMEOUT: Given operation exceeded limit computation time.
"""
REG_FAIL = auto()
UNREG_FAIL = auto()
TIMEOUT = auto()
@final
class DBErrT(Enum):
"""
DB error types.
:cvar OPEN_ERR: Cannot open source file.
:cvar CLOSE_ERR: Cannot close source file.
"""
OPEN_ERR = auto()
CLOSE_ERR = auto()
@final
class UtilErrT(Enum):
"""
Utility command error type.
:cvar NOT_FOUND: System variable is not found.
:cvar T_MISMATCH: Type of system variable and given parameter does not match.
:cvar RD_ONLY: System variable is read only.
:cvar QUIT: Terminate system.
:cvar INF_DETECT: Inf is detected as a given parameter.
:cvar NAN_DETECT: Nan is detected as a given parameter.
:cvar DOMAIN_OUT: Given parameter is not in domain.
"""
NOT_FOUND = auto()
T_MISMATCH = auto()
RD_ONLY = auto()
QUIT = auto()
INF_DETECT = auto()
NAN_DETECT = auto()
DOMAIN_OUT = auto()
@final
class InterpWarnT(Enum):
"""
Interpreter warning type.
:cvar DOMAIN_OUT: Given parameter is not in domain.
:cvar POLE_DETECT: Mathematical pole is detected.
:cvar NAN_DETECT: Nan is detected as a given parameter.
:cvar INF_DETECT: Inf is detected as a given parameter.
:cvar BIG_INT: Too big integer which cannot be casted to float is detected as a given parameter.
:cvar SMALL_INT: Too small integer which cannot be casted to float is detected as a given parameter.
"""
DOMAIN_OUT = auto()
POLE_DETECT = auto()
NAN_DETECT = auto()
INF_DETECT = auto()
BIG_INT = auto()
SMALL_INT = auto()
@final
class UtilWarnT(Enum):
"""
Utility command warning type.
:cvar DOMAIN_OUT: Given parameter is not in domain.
:cvar TURN_OFF: Timeout functionality is turned off.
:cvar INF_DETECT: Inf is detected as a given parameter.
"""
DOMAIN_OUT = auto()
TURN_OFF = auto()
INF_DETECT = auto()
@final
class TestSzT(Enum):
SMALL = auto()
MEDIUM = auto()
LARGE = auto()
@final
class FileSrc:
"""
File source class for DB loading.
:cvar __cnt: Counter to assign index for each source.
:ivar __path: Path of source file.
:ivar __brief: Brief description of source file.
:ivar __tag: Flag for tagged DB source file.
:ivar __idx: Index of source file in DB storage.
"""
__cnt: int = 0
def __init__(self, path: str, brief: str, tag: bool) -> None:
self.__path: str = path
self.__brief: str = brief
self.__tag: bool = tag
self.__idx: int = self.__cnt
FileSrc.inc_cnt()
def __del__(self) -> None:
pass
@classmethod
def inc_cnt(cls) -> None:
"""
Increase class counter by one.
"""
cls.__cnt += 1
@property
def path(self) -> str:
"""
Getter for path of source file.
:return: Path of source file.
:rtype: str
"""
return self.__path
@property
def brief(self) -> str:
"""
Getter for brief description of source file.
:return: Brief description.
:rtype: str
"""
return self.__brief
@property
def tag(self) -> bool:
"""
Getter for tag flag.
:return: True if the source file is tagged. False otherwise.
:rtype: bool
"""
return self.__tag
@property
def idx(self) -> int:
"""
Getter for index of the source file in DB storage.
:return: Index in DB storage.
:rtype: int
"""
return self.__idx
@final
class SigHandler:
"""
Signal handler class for system manager.
:ivar __sig: Signal no to be handled.
:ivar __handler: Handler for signal.
:ivar __brief: Brief description of signal to be handled.
"""
def __init__(self, sig: int, handler: Callable[..., None], brief: str) -> None:
self.__sig: int = sig
self.__handler: Callable[..., None] = handler
self.__brief = brief
def __del__(self) -> None:
pass
@property
def sig(self) -> int:
"""
Getter for signal no to be handled.
:return: Signal no to be handled.
:rtype: int
"""
return self.__sig
@property
def handler(self) -> Callable[..., None]:
"""
Getter for signal handler.
:return: Signal handler.
:rtype: Callable[..., None]
"""
return self.__handler
@property
def brief(self) -> str:
"""
Getter for brief description of signal to be handled.
:return: Brief description.
:rtype: str
"""
return self.__brief
@final
class SysVar:
"""
System variable class for system manager.
:ivar __v: Value of system variable.
:ivar __t: Type of system variable.
:ivar __rd_only: Read only flag. (Default: True)
"""
def __init__(self, v: Union[str, int], rd_only: bool = True) -> None:
self.__v: Union[str, int] = v
self.__rd_only: bool = rd_only
def __del__(self) -> None:
pass
@property
def v(self) -> Union[str, int]:
"""
Getter for system variable value.
:return: System variable value.
:rtype: Union[str, int]
"""
return self.__v
@property
def rd_only(self) -> bool:
"""
Getter for read only flag.
:return: Read only flag.
:rtype: bool
"""
return self.__rd_only
@v.setter
def v(self, v: Union[int, str]) -> None:
"""
Setter for system variable value.
:param v: Value of system variable to be set.
:type v: Union[int, str]
"""
self.__v = v
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,540 | eik4862/TinyCalculator | refs/heads/master | /Core/AST.py | from __future__ import annotations
from typing import Dict, Tuple, List
from Core import Token
from Operator import *
class AST:
"""
AST class which supports various string expression generation.
Note that in this program, AST is also expression tree.
For the concept of AST and expression tree, refer to the reference below.
**Reference**
* https://en.wikipedia.org/wiki/Abstract_syntax_tree
* https://en.wikipedia.org/wiki/Binary_expression_tree
:ivar __rt: Root token of AST.
:ivar __line: Original user input string.
"""
__var_tb: Dict[int, str] = {}
def __init__(self, rt, line: str = None) -> None:
self.__rt = rt
self.__line: str = line
def __str__(self) -> str:
"""
Generate infix string expression of AST.
It just calls its helper ``AST.__str_hlpr``.
For detailed description of infix expression generation, refer to the comments in ``AST.__str_hlpr``.
:return: Infix expression.
:rtype: str
"""
return self.__str_hlpr(self.__rt)
@classmethod
def var_name(cls, k: int) -> str:
"""
Find variable from variable table with key.
:param k: Key of variable to be found.
:type k: int
:return: Found variable.
:rtype: str
"""
return cls.__var_tb.get(k)
@classmethod
def add_var(cls, k: int, var: str) -> None:
"""
Add variable with key in variable table.
:param k: Key of variable to be added.
:type k: int
:param var: Variable to be added.
:type var: str
"""
cls.__var_tb[k] = var
def __str_hlpr(self, rt) -> str:
"""
Generate infix expression of partial AST.
For construction, it uses inorder traversal of AST.
It also handles parenthesis properly.
For the concept and implementation of inorder traversal, consult the references below.
This method is private and called internally as a helper of ``AST.__str__``.
**Reference**
* https://en.wikipedia.org/wiki/Tree_traversal#In-order_(LNR)
* https://en.wikipedia.org/wiki/Binary_expression_tree#Infix_traversal
:param rt: Root of partial AST whose infix expression is to be generated.
:type rt: Token.Tok
:return: Infix expression.
:rtype: str
"""
# For infix expression, it must determine whether parenthesis is needed.
# This can be done by comparing the precedence b/w operator.
# If outer precedence of parent operator is higher than inner precedence of child operator, there must be
# parenthesis.
# Otherwise, there is no need of parenthesis.
# Also note that escape sequence of STR token should be unescaped.
# The following logic is an implementation of these rules.
tok_t: type = type(rt)
if tok_t == Token.Op:
buf: str = '' # Buffer for operands.
if rt.v in [Binary.Add, Binary.Mul, Binary.MatMul, Bool.And, Bool.Or, Bool.Xor]:
for tok in rt.chd:
if type(tok) == Token.Op and rt.precd_in > tok.precd_in:
buf += f'({self.__str_hlpr(tok)}) {rt.v.sym()} '
else:
buf += f'{self.__str_hlpr(tok)} {rt.v.sym()} '
return buf[:-len(rt.v.sym()) - 2]
elif rt.v == Binary.Pow or rt.v.__base__ == Assign.AsgnOp:
if type(rt.chd[0]) == Token.Op and rt.precd_in >= rt.chd[0].precd_in:
buf += f'({self.__str_hlpr(rt.chd[0])}) {rt.v.sym()} '
else:
buf += f'{self.__str_hlpr(rt.chd[0])} {rt.v.sym()} '
if type(rt.chd[1]) == Token.Op and rt.precd_in > rt.chd[1].precd_in:
buf += f'({self.__str_hlpr(rt.chd[1])})'
else:
buf += self.__str_hlpr(rt.chd[1])
return buf
elif rt.v in [Unary.Plus, Unary.Minus, Bool.Neg]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
return f'{rt.v.sym()}({self.__str_hlpr(rt.chd[0])})'
else:
return f'{rt.v.sym()}{self.__str_hlpr(rt.chd[0])}'
elif rt.v == Unary.Trans:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in and rt.chd[0].v != Delimiter.Idx:
return f'({self.__str_hlpr(rt.chd[0])})\''
else:
return f'{self.__str_hlpr(rt.chd[0])}\''
elif rt.v == Delimiter.Seq:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf += f'({self.__str_hlpr(rt.chd[0])}):'
else:
buf += f'{self.__str_hlpr(rt.chd[0])}:'
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({self.__str_hlpr(rt.chd[1])})'
else:
buf += self.__str_hlpr(rt.chd[1])
return buf
elif rt.v == Delimiter.Idx:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf += f'({self.__str_hlpr(rt.chd[0])})['
else:
buf += f'{self.__str_hlpr(rt.chd[0])}['
return buf + ', '.join([self.__str_hlpr(tok) for tok in rt.chd[1:]]) + ']'
else:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf += f'({self.__str_hlpr(rt.chd[0])}) {rt.v.sym()} '
else:
buf += f'{self.__str_hlpr(rt.chd[0])} {rt.v.sym()} '
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({self.__str_hlpr(rt.chd[1])})'
else:
buf += self.__str_hlpr(rt.chd[1])
return buf
elif tok_t == Token.Fun:
return rt.v_str() + '[' + ', '.join([self.__str_hlpr(tok) for tok in rt.chd]) + ']'
elif tok_t == Token.List:
return '{' + ', '.join([self.__str_hlpr(tok) for tok in rt.chd]) + '}'
else:
return rt.v_str()
def __str_pos_hlpr(self, rt: Token.Tok, target: Token.Tok) -> Tuple[str, bool, int]:
tok_t: type = type(rt)
if tok_t == Token.Op:
if rt.v == Binary.Pow or rt.v.__base__ in [Assign.AsgnOp]:
tmp_1: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[0], target)
tmp_2: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[1], target)
if rt == target:
if type(rt.chd[0]) == Token.Op and rt.precd_in >= rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
pos: int = tmp_1[2] + 3
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
pos: int = tmp_1[2] + 1
if type(rt.chd[1]) == Token.Op and rt.precd_in > rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, True, pos
elif tmp_1[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in >= rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
pos: int = tmp_1[2] + 1
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
pos: int = tmp_1[2]
if type(rt.chd[1]) == Token.Op and rt.precd_in > rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, True, pos
elif tmp_2[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in >= rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
if type(rt.chd[1]) == Token.Op and rt.precd_in > rt.chd[1].precd_in:
pos: int = len(buf) + tmp_2[2] + 1
buf += f'({tmp_2[0]})'
else:
pos: int = len(buf) + tmp_2[2]
buf += tmp_2[0]
return buf, True, pos
else:
if type(rt.chd[0]) == Token.Op and rt.precd_in >= rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
if type(rt.chd[1]) == Token.Op and rt.precd_in > rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, False, len(buf)
elif rt.v in [Unary.Plus, Unary.Minus, Bool.Neg]:
tmp: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[0], target)
if rt == target:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
return f'{rt.v.sym()}({tmp[0]})', True, 0
else:
return f'{rt.v.sym()}{tmp[0]}', True, 0
elif tmp[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
return f'{rt.v.sym()}({tmp[0]})', True, tmp[2] + 2
else:
return f'{rt.v.sym()}{tmp[0]}', True, tmp[2] + 1
else:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
return f'{rt.v.sym()}({tmp[0]})', False, tmp[2] + 3
else:
return f'{rt.v.sym()}{tmp[0]}', False, tmp[2] + 1
elif rt.v == Unary.Trans:
tmp: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[0], target)
if rt == target:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in and \
rt.chd[0].v != Delimiter.Idx:
return f'({tmp[0]})\'', True, tmp[2] + 2
else:
return f'{tmp[0]}\'', True, tmp[2]
elif tmp[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in and \
rt.chd[0].v != Delimiter.Idx:
return f'({tmp[0]})\'', True, tmp[2] + 1
else:
return f'{tmp[0]}\'', True, tmp[2]
else:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
return f'({tmp[0]})\'', False, tmp[2] + 3
else:
return f'{tmp[0]}\'', False, tmp[2] + 1
elif rt.v == Delimiter.Seq:
tmp_1: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[0], target)
tmp_2: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[1], target)
if rt == target:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}):'
pos: int = tmp_1[2] + 2
else:
buf: str = f'{tmp_1[0]}:'
pos: int = tmp_1[2]
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, True, pos
elif tmp_1[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}):'
pos: int = tmp_1[2] + 1
else:
buf: str = f'{tmp_1[0]}:'
pos: int = tmp_1[2]
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, True, pos
elif tmp_2[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}):'
else:
buf: str = f'{tmp_1[0]}:'
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
pos: int = len(buf) + tmp_2[2] + 1
buf += f'({tmp_2[0]})'
else:
pos: int = len(buf) + tmp_2[2]
buf += tmp_2[0]
return buf, True, pos
else:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}):'
else:
buf: str = f'{tmp_1[0]}:'
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, False, len(buf)
elif rt.v == Delimiter.Idx:
tmp_l: List[Tuple[str, bool, int]] = [self.__str_pos_hlpr(tok, target) for tok in rt.chd]
if rt == target:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_l[0][0]})['
pos: int = tmp_l[0][2] + 2
else:
buf: str = f'{tmp_l[0][0]}['
pos: int = tmp_l[0][2]
return buf + ', '.join([tmp[0] for tmp in tmp_l[1:]]) + ']', True, pos
elif tmp_l[0][1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_l[0][0]})['
pos: int = tmp_l[0][2] + 1
else:
buf: str = f'{tmp_l[0][0]}['
pos: int = tmp_l[0][2]
return buf + ', '.join([tmp[0] for tmp in tmp_l[1:]]) + ']', True, pos
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_l[0][0]})['
else:
buf: str = f'{tmp_l[0][0]}['
pos: int = len(buf)
i: int = 1
while i < len(tmp_l) and not tmp_l[i][1]:
pos += tmp_l[i][2] + 2
i += 1
if i == len(tmp_l):
return buf + ', '.join([tmp[0] for tmp in tmp_l[1:]]) + ']', False, pos - 1 if i > 1 else pos + 1
else:
return buf + ', '.join([tmp[0] for tmp in tmp_l[1:]]) + ']', True, pos
else:
tmp_1: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[0], target)
tmp_2: Tuple[str, bool, int] = self.__str_pos_hlpr(rt.chd[1], target)
if rt == target:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
pos: int = tmp_1[2] + 3
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
pos: int = tmp_1[2] + 1
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, True, pos
elif tmp_1[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
pos: int = tmp_1[2] + 1
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
pos: int = tmp_1[2]
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, True, pos
elif tmp_2[1]:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
pos: int = len(buf) + tmp_2[2] + 1
buf += f'({tmp_2[0]})'
else:
pos: int = len(buf) + tmp_2[2]
buf += tmp_2[0]
return buf, True, pos
else:
if type(rt.chd[0]) == Token.Op and rt.precd_in > rt.chd[0].precd_in:
buf: str = f'({tmp_1[0]}) {rt.v.sym()} '
else:
buf: str = f'{tmp_1[0]} {rt.v.sym()} '
if type(rt.chd[1]) == Token.Op and rt.precd_in >= rt.chd[1].precd_in:
buf += f'({tmp_2[0]})'
else:
buf += tmp_2[0]
return buf, False, len(buf)
elif tok_t == Token.Fun:
tmp_l: List[Tuple[str, bool, int]] = [self.__str_pos_hlpr(tok, target) for tok in rt.chd]
if rt == target:
return rt.v_str() + '[' + ', '.join([tmp[0] for tmp in tmp_l]) + ']', True, 0
buf: str = rt.v_str() + '['
pos: int = len(buf)
i: int = 0
while i < len(tmp_l) and not tmp_l[i][1]:
pos += tmp_l[i][2] + 2
i += 1
print(pos)
if i == len(tmp_l):
return buf + ', '.join([tmp[0] for tmp in tmp_l]) + ']', False, pos - 1 if i > 0 else pos + 1
else:
return buf + ', '.join([tmp[0] for tmp in tmp_l]) + ']', True, pos + tmp_l[i][2]
elif tok_t == Token.List:
tmp_l: List[Tuple[str, bool, int]] = [self.__str_pos_hlpr(tok, target) for tok in rt.chd]
if rt == target:
return '{' + ', '.join([tmp[0] for tmp in tmp_l]) + '}', True, 0
buf: str = '{'
pos: int = 1
i: int = 0
while i < len(tmp_l) and not tmp_l[i][1]:
pos += tmp_l[i][2] + 2
i += 1
if i == len(tmp_l):
return buf + ', '.join([tmp[0] for tmp in tmp_l]) + '}', False, pos - 1 if i > 0 else pos + 1
else:
return buf + ', '.join([tmp[0] for tmp in tmp_l]) + '}', True, pos
else:
buf: str = rt.v_str()
return (buf, True, 0) if rt == target else (buf, False, len(buf))
@property
def rt(self):
"""
Getter for root token of AST.
:return: Root token.
:rtype: Token.Tok
"""
return self.__rt
@property
def line(self) -> str:
"""
Getter for original user input string.
:return: Original input string.
:rtype: str
"""
return self.__line
@rt.setter
def rt(self, tok) -> None:
"""
Setter for root token of AST.
:param tok: Token to be set as a root.
:type tok: Token.Tok
"""
self.__rt = tok
def str_pos(self, tok: Token.Tok) -> Tuple[str, int]:
res = self.__str_pos_hlpr(self.__rt, tok)
return res[0], res[2]
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,541 | eik4862/TinyCalculator | refs/heads/master | /Core/Parser.py | from __future__ import annotations
import math
from typing import List, final, Union, Dict
from Core import Token, AST, Type, WarningManager
from Error import *
from Warning import *
from Operator import *
from Function import *
from Util import Printer
from Util.Macro import *
@final
class Parser:
"""
Parse user input string to generate AST(Abstract Syntax Tree).
This class is implemented as singleton.
For the concept of AST and singleton pattern, consult the references below.
**Reference**
* https://en.wikipedia.org/wiki/Abstract_syntax_tree
* https://en.wikipedia.org/wiki/Singleton_pattern
:cvar __inst: Singleton object.
:ivar __line: Original user input string.
:ivar __infix: Storage for tokens in infix order.
:ivar __postfix: Storage for tokens in postfix order.
:ivar __tmp_stk: Temporary stack for infix to postfix conversion and AST generation.
"""
__inst: Parser = None
__kword_tb: Dict[str, Union[float, bool, Function.Fun]] = {}
def __init__(self) -> None:
self.__line: str = ''
self.__infix: List[Token.Tok] = []
self.__postfix: List[Token.Tok] = []
self.__tmp_stk: List[Token.Tok] = []
for fun_category in Function.Fun.__subclasses__():
for fun in fun_category.__subclasses__():
self.__kword_tb[fun.__name__] = fun
for const in Type.Const:
self.__kword_tb[const.name] = const.value
self.__kword_tb['True'] = True
self.__kword_tb['False'] = False
def __init(self) -> None:
"""
Initialize parser.
Clear internal buffers to store tokens.
This method is private and called internally as the first step of parsing chain.
For detailed description for parsing chain, refer to the comments of ``Parser.parse``.
"""
self.__infix.clear()
self.__postfix.clear()
def __lexer(self) -> None:
"""
Lexer(lexical analyzer) for parsing.
Read character from target string one by one and tokenize it properly.
Further, it checks the syntax of input using ``Parser.__add_tok``.
For detailed description for syntax checking, refer to the comments of ``Parser.__add_tok``.
This method is private and called internally as the second step of parsing chain.
For detailed description for parsing chain, refer to the comments of ``Parser.parse``.
:raise BIG_INT: If parsed integer is bigger than the maximum float size.
:raise OVERFLOW: If parsed float caused overflow.
:raise INVALID_EXPR: If the input string is invalid expression.
:raise INVALID_TOK: If unknown token is encountered.
:raise EMPTY_EXPR: If the input expression is void.
"""
pos: int = 0 # Current position at the string to be tokenized.
while pos < len(self.__line):
if is_white(self.__line[pos]):
# Skip all white spaces
while pos < len(self.__line) and is_white(self.__line[pos]):
pos += 1
elif is_digit(self.__line[pos]):
# Parsing numeric value with integer part comprises of four steps.
# 1. Parse integer part.
# 2. Check for decimal point and parse fractional part.
# 3. Check for additional exponentation and parse exponent.
# 4. Check for imaginary unit and parse it.
# There are following restriction for numeric value literals.
# 1. Decimal point cannot appear more than once.
# 2. Additional exponentation must be followed by integer.
# 3. There can be only one sign after additional exponentation.
# 4. Imaginary unit, if exists, must be the terminal of the numeric value.
# This logic generates warning in following cases.
# 1. If the parsed integer part is too big so that it cannot be casted to float, it generates BIT_INT
# warning.
# 2. If overflow occurs, it generates OVERFLOW warning.
# The following logic is an implementation of these steps, restriction rules, and warning generation
# rules.
start: int = pos # Starting position.
# Parse integer part.
while pos < len(self.__line) and is_digit(self.__line[pos]):
pos += 1
if pos == len(self.__line) or not (
is_dot(self.__line[pos]) or is_exp(self.__line[pos]) or is_imag(self.__line[pos])):
# If there is nothing more, we are done.
parsed: int = int(self.__line[start:pos]) # Parsed numeric.
if is_bigint(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(58, start))
self.__add_tok(Token.Num(parsed, start))
continue
elif is_dot(self.__line[pos]):
# If there is decimal point, parse fractional part.
pos += 1
# Parse fractional part.
while pos < len(self.__line) and is_digit(self.__line[pos]):
pos += 1
if pos == len(self.__line) or not (
is_dot(self.__line[pos]) or is_exp(self.__line[pos]) or is_imag(self.__line[pos])):
# If there is no additional exponentation, stop.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
elif is_dot(self.__line[pos]):
# Decimal point cannot appear more than once.
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
elif is_exp(self.__line[pos]):
# If there is additional exponentation, parse exponent.
pos += 1
# Additional exponent must be followed by integer.
if pos == len(self.__line):
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
elif is_sgn(self.__line[pos]):
if pos + 1 == len(self.__line[pos]) or not is_digit(self.__line[pos + 1]):
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
pos += 1
elif not is_digit(self.__line[pos]):
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
# Parse additional exponent.
while pos < len(self.__line) and is_digit(self.__line[pos]):
pos += 1
if pos == len(self.__line) or not is_imag(self.__line[pos]):
# If there is nothing more, we are done.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
else:
# If there is imaginary unit, parse it.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(complex(0, parsed), start))
pos += 1
continue
else:
# If there is imaginary unit, parse it.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(complex(0, parsed), start))
pos += 1
continue
elif is_exp(self.__line[pos]):
# If there is additional exponentation, parse exponent.
pos += 1
# Additional exponent must be followed by integer.
if pos == len(self.__line):
pos -= 1
parsed: int = int(self.__line[start:pos]) # Parsed numeric.
if is_bigint(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(58, start))
self.__add_tok(Token.Num(parsed, start))
continue
elif is_sgn(self.__line[pos]):
if pos + 1 == len(self.__line[pos]) or not is_digit(self.__line[pos + 1]):
pos -= 1
parsed: int = int(self.__line[start:pos]) # Parsed numeric.
if is_bigint(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(58, start))
self.__add_tok(Token.Num(parsed, start))
continue
pos += 1
elif not is_digit(self.__line[pos]):
pos -= 1
parsed: int = int(self.__line[start:pos]) # Parsed numeric.
if is_bigint(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(58, start))
self.__add_tok(Token.Num(parsed, start))
continue
# Parse additional exponent.
while pos < len(self.__line) and is_digit(self.__line[pos]):
pos += 1
if pos == len(self.__line) or not is_imag(self.__line[pos]):
# If there is nothing more, we are done.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
else:
# If there is imaginary unit, parse it.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(complex(0, parsed), start))
pos += 1
continue
else:
# If there is imaginary unit, parse it.
parsed: int = int(self.__line[start:pos]) # Parsed numeric.
if is_bigint(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
parsed = math.inf
self.__add_tok(Token.Num(complex(0, parsed), start))
pos += 1
continue
elif is_dot(self.__line[pos]):
# Parsing numeric value w/o integer part comprises of three steps.
# 1. Parse fractional part.
# 2. Check for additional exponentation and parse exponent.
# 3. Check for imaginary unit and parse it.
# There are following restriction for numeric value literals.
# 1. Decimal point cannot appear more than once.
# 2. Additional exponentation must be followed by integer.
# 3. There can be only one sign after additional exponentation.
# 4. Imaginary unit, if exists, must be the terminal of the numeric value.
# This logic generates warning in following cases.
# 1. If overflow occurs, it generates OVERFLOW warning.
# The following logic is an implementation of these steps, restriction rules, and warning generation
# rules.
start: int = pos # Starting position.
pos += 1
if pos == len(self.__line) or not is_digit(self.__line[pos]):
raise ParserError.InvalidTok(2, self.__line, start)
# Parse fractional part.
while pos < len(self.__line) and is_digit(self.__line[pos]):
pos += 1
if pos == len(self.__line) or not (
is_dot(self.__line[pos]) or is_exp(self.__line[pos]) or is_imag(self.__line[pos])):
# If there is no additional exponentation, stop.
self.__add_tok(Token.Num(float(self.__line[start:pos]), start))
continue
elif is_dot(self.__line[pos]):
# Decimal point cannot appear more than once.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
elif is_exp(self.__line[pos]):
# If there is additional exponentation, parse exponent.
pos += 1
# Additional exponent must be followed by integer.
if pos == len(self.__line):
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
elif is_sgn(self.__line[pos]):
if pos + 1 == len(self.__line[pos]) or not is_digit(self.__line[pos + 1]):
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
pos += 1
elif not is_digit(self.__line[pos]):
pos -= 1
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
# Parse additional exponent.
while pos < len(self.__line) and is_digit(self.__line[pos]):
pos += 1
if pos == len(self.__line) or not is_imag(self.__line[pos]):
# If there is nothing more, we are done.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(parsed, start))
continue
else:
# If there is imaginary unit, parse it.
parsed: float = float(self.__line[start:pos]) # Parsed numeric.
if math.isinf(parsed):
WarningManager.WarnManager.inst().push(ParserWarning.NumWarn(59, start))
self.__add_tok(Token.Num(complex(0, parsed), start))
pos += 1
continue
else:
self.__add_tok(Token.Num(complex(0, float(self.__line[start:pos])), start))
pos += 1
continue
elif is_alpha(self.__line[pos]):
# Parse function/command/constant/variable.
# Variable name has following rules.
# 1. It consists of alphabets, digits, and underscore.
# 2. It must start with alphabet.
# 3. It cannot terminate with underscore.
# Since it cannot determine whether it means boolean/function/command/constant/variable, it searches
# for DB after parsing.
# For efficiency, parsed string for variable will be hashed.
start: int = pos # Starting position.
pos += 1
while (pos < len(self.__line) and (
is_alpha(self.__line[pos]) or is_digit(self.__line[pos]) or is_underscore(self.__line[pos]))):
pos += 1
if is_underscore(self.__line[pos - 1]):
pos -= 1
# Check whether parsed symbol is function.
find: Union[float, bool, Function.Fun] = self.__kword_tb.get(self.__line[start:pos])
if type(find) == bool:
self.__add_tok(Token.Bool(find, start))
elif type(find) == float:
self.__add_tok(Token.Num(find, start))
elif type(find) == type:
self.__add_tok(Token.Fun(find, start))
else:
str_hash: int = hash(self.__line[start:pos]) # Hash value of parsed string.
self.__add_tok(Token.Var(str_hash, start))
if not AST.AST.var_name(str_hash):
AST.AST.add_var(str_hash, self.__line[start:pos])
continue
elif is_quote(self.__line[pos]):
# Parse string.
# Note that string must be enclosed by double quote.
# Also, following escaping sequences should be handled.
# 1. \n for newline.
# 2. \t for tab.
# 3. \\ for backslash.
# 4. \" for double quote.
start: int = pos # Starting position.
pos += 1
parsed: str = ''
while pos < len(self.__line) and not is_quote(self.__line[pos]):
if self.__line[pos] == '\\':
# Handle escaping sequence by lookahead one character.
if pos + 1 == len(self.__line):
raise ParserError.InvalidExpr(32, self.__line, pos)
if self.__line[pos + 1] == 'n':
parsed += '\n'
elif self.__line[pos + 1] == 't':
parsed += '\t'
elif self.__line[pos + 1] == '\\':
parsed += '\\'
elif self.__line[pos + 1] == '"':
parsed += '"'
else:
raise ParserError.InvalidExpr(32, self.__line, pos)
pos += 1
else:
parsed += self.__line[pos]
pos += 1
# Double quote must be closed.
if pos == len(self.__line):
raise ParserError.InvalidExpr(1, self.__line, start)
self.__add_tok(Token.Str(parsed, start))
pos += 1
continue
else:
# Parse operator.
# Some notes to make.
# 1. It cannot determine whether + means Add or Plus.
# Just try as Add token and let ``Parser.__add_tok`` to determine this.
# 2. It cannot determine whether - means Sub or Minus.
# Just try as Sub token and let ``Parser.__add_tok`` to determine this.
# 3. It cannot determine whether [ and ] are used as function call or indexing operation.
# It will be determined by ``Parser.__infix_to_postfix`` later.
# 4. It cannot determine whether : is binary or ternary operator.
# Just try as binary operator and let ``Parser.__infix_to_postfix`` to determine this.
if self.__line[pos] == '+':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.AddAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Binary.Add, pos))
pos += 1
continue
elif self.__line[pos] == '-':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.SubAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Binary.Sub, pos))
pos += 1
continue
elif self.__line[pos] == '*':
if pos + 2 < len(self.__line) and self.__line[pos + 1:pos + 3] == '*=':
self.__add_tok(Token.Op(Assign.PowAsgn, pos))
pos += 3
elif pos + 1 < len(self.__line):
if self.__line[pos + 1] == '*':
self.__add_tok(Token.Op(Binary.Pow, pos))
pos += 2
elif self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.MulAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Binary.Mul, pos))
pos += 1
else:
self.__add_tok(Token.Op(Binary.Mul, pos))
pos += 1
continue
elif self.__line[pos] == '/':
if pos + 2 < len(self.__line) and self.__line[pos + 1: pos + 3] == '/=':
self.__add_tok(Token.Op(Assign.QuotAsgn, pos))
pos += 3
if pos + 1 < len(self.__line):
if self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.DivAsgn, pos))
pos += 2
elif self.__line[pos + 1] == '/':
self.__add_tok(Token.Op(Binary.Quot, pos))
pos += 2
else:
self.__add_tok(Token.Op(Binary.Div, pos))
pos += 1
else:
self.__add_tok(Token.Op(Binary.Div, pos))
pos += 1
continue
elif self.__line[pos] == '%':
if pos + 3 < len(self.__line) and self.__line[pos + 1:pos + 4] == '*%=':
self.__add_tok(Token.Op(Assign.MatMulAsgn, pos))
pos += 4
elif pos + 2 < len(self.__line) and self.__line[pos + 1:pos + 3] == '*%':
self.__add_tok(Token.Op(Binary.MatMul, pos))
pos += 3
elif pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.RemAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Binary.Rem, pos))
pos += 1
continue
elif self.__line[pos] == '\'':
self.__add_tok(Token.Op(Unary.Trans, pos))
pos += 1
continue
elif self.__line[pos] == '!':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Compare.Diff, pos))
pos += 2
else:
self.__add_tok(Token.Op(Bool.Neg, pos))
pos += 1
continue
elif self.__line[pos] == '&':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.AndAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Bool.And, pos))
pos += 1
continue
elif self.__line[pos] == '|':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.OrAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Bool.Or, pos))
pos += 1
continue
elif self.__line[pos] == '^':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Assign.XorAsgn, pos))
pos += 2
else:
self.__add_tok(Token.Op(Bool.Xor, pos))
pos += 1
continue
elif self.__line[pos] == '<':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Compare.Geq, pos))
pos += 2
else:
self.__add_tok(Token.Op(Compare.Abv, pos))
pos += 1
continue
elif self.__line[pos] == '>':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Compare.Leq, pos))
pos += 2
else:
self.__add_tok(Token.Op(Compare.Blw, pos))
pos += 1
continue
elif self.__line[pos] == '=':
if pos + 1 < len(self.__line) and self.__line[pos + 1] == '=':
self.__add_tok(Token.Op(Compare.Eq, pos))
pos += 2
else:
self.__add_tok(Token.Op(Assign.Asgn, pos))
pos += 1
continue
elif self.__line[pos] == '(':
self.__add_tok(Token.Op(Delimiter.Lpar, pos))
pos += 1
continue
elif self.__line[pos] == ')':
self.__add_tok(Token.Op(Delimiter.Rpar, pos))
pos += 1
continue
elif self.__line[pos] == '[':
self.__add_tok(Token.Op(Delimiter.SqrLpar, pos))
pos += 1
continue
elif self.__line[pos] == ']':
self.__add_tok(Token.Op(Delimiter.SqrRpar, pos))
pos += 1
continue
elif self.__line[pos] == '{':
self.__add_tok(Token.Op(Delimiter.CrlLpar, pos))
pos += 1
continue
elif self.__line[pos] == '}':
self.__add_tok(Token.Op(Delimiter.CrlRpar, pos))
pos += 1
continue
elif self.__line[pos] == ':':
self.__add_tok(Token.Op(Delimiter.Seq, pos))
pos += 1
continue
elif self.__line[pos] == ',':
self.__add_tok(Token.Op(Delimiter.Com, pos))
pos += 1
continue
# Unknown token is encountered.
raise ParserError.InvalidTok(2, self.__line, pos)
# Check whether expression is void.
if not self.__infix:
raise ParserError.EmptyExpr(3)
# By adding terminal token, check terminal condition of expression.
self.__add_tok(Token.Ter())
def __add_tok(self, tok: Token.Tok) -> None:
"""
Add token to internal buffer.
Before it adds token, it checks the syntax.
If it detects syntax error, it raises exception.
Further, based on the previously added token, it determines whether +/- are Plus/Minus or Add/Sub, resp.
And based on the previously added token again, it detects the need of implicit multiplication and adds * between
them to make it explicit.
This method is private and called internally as a helper of ``Parser.__lexer``.
For detailed description for lexing, refer to the comments of ``Parser.__lexer``.
:param tok: Token to be added.
:type tok: Token.Tok
:raise INVALID_EXPR: If the input string is invalid expression.
"""
curr_t: type = type(tok) # Token type of token to be added.
curr_v = tok.v # Token value of token to be added.
# Starting condition for expression is as follows.
# 1. It can start with any of Num/Var/Fun/Str/Bool token.
# 2. It can start with Add/Sub/Neg/Lpar/CrlLpar token.
# But Add/Sub here means Plus/Minus, resp.
# All other cases are illegal.
# The following logic is an implementation of these rules.
if not self.__infix:
if curr_t == Token.Op:
if curr_v == Binary.Add:
tok.v = Unary.Plus
self.__infix.append(tok)
return
elif curr_v == Binary.Sub:
tok.v = Unary.Minus
self.__infix.append(tok)
return
elif curr_v in [Bool.Neg, Delimiter.Lpar, Delimiter.CrlLpar]:
self.__infix.append(tok)
return
else:
raise ParserError.InvalidExpr(7, self.__line, tok.pos)
else:
self.__infix.append(tok)
return
prev_t: type = type(self.__infix[-1]) # Token type of previously added
prev_v = self.__infix[-1].v # Value of previously added
# Terminal condition for expression is as follows.
# 1. It can terminate with any of Num/Var/Str/Bool token.
# 2. If can terminate with Trans/Rpar/SqrRpar/CrlRpar token.
# All other cases are illegal.
# The following logic is an implementation of these rules.
# Note that TER token is not added to the infix array.
if curr_t == Token.Ter:
if prev_t == Token.Fun:
raise ParserError.InvalidExpr(4, self.__line, self.__infix[-1].pos)
elif prev_t == Token.Op:
if prev_v not in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
raise ParserError.InvalidExpr(5, self.__line, self.__infix[-1].pos)
return
if curr_t == Token.Op:
# Adjacent rule for OP token is as follows.
# 1. It can adjacent with any of Num/Var/Str/Bool.
# But if current OP token is Neg/Lpar/CrlLpar, it needs implicit multiplication b/w them.
# 2. If previous one is Fun token and current one is SqrLpar, they can adjacent.
# 3. If previous one is OP token,
# 3.1. If current one is Add/Sub, it can adjacent with any OP token.
# But if previous one is not Trans/Rpar/SqrRpar/CrlRpar, +/- here means Plus/Minus, resp.
# 3.2. If current one is Neg/Lpar/CrlLpar, it can adjacent with any OP token.
# But if previous one is Trans/Rpar/SqrRpar/CrlRpar, it needs implicit multiplication b/w them.
# 3.3. If current one is SqrRpar/Com and previous one is Trans/Rpar/SqrLpar/SqrRpar/CrlRpar/Com, they
# can adjacent.
# But if previous one is SqrLpar/Com, it needs Void token b/w them.
# 3.4. If current one is CrlRpar and previous one is Trans/Rpar/SqrRpar/CrlLpar/CrlRpar, they can
# adjacnet.
# But if previous one is CrlLpar, it needs Void token b/w them.
# 3.5. If current one is not Add/Sub/Neg/Lpar/SqrLpar/SqrRpar/CrlLpar/CrlRpar/Com and previous one is
# Trans/Rpar/SqrRpar/CrlRpar, they can adjacent.
# All other cases are illegal.
# The following logic is an implementation of these rules.
if prev_t == Token.Op:
if curr_v == Binary.Add:
if prev_v not in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
tok.v = Unary.Plus
self.__infix.append(tok)
return
elif curr_v == Binary.Sub:
if prev_v not in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
tok.v = Unary.Minus
self.__infix.append(tok)
return
elif curr_v in [Bool.Neg, Delimiter.Lpar, Delimiter.CrlLpar]:
if prev_v in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
self.__infix.append(Token.Op(Binary.Mul))
self.__infix.append(tok)
return
elif curr_v in [Delimiter.SqrRpar, Delimiter.Com]:
if prev_v in [Delimiter.SqrLpar, Delimiter.Com]:
self.__infix.append(Token.Void())
self.__infix.append(tok)
return
elif prev_v in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
self.__infix.append(tok)
return
else:
raise ParserError.InvalidExpr(11, self.__line, tok.pos, (prev_v, curr_v))
elif curr_v == Delimiter.CrlRpar:
if prev_v == Delimiter.CrlLpar:
self.__infix.append(Token.Void())
self.__infix.append(tok)
return
elif prev_v in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
self.__infix.append(tok)
return
else:
raise ParserError.InvalidExpr(11, self.__line, tok.pos, (prev_v, curr_v))
else:
if prev_v in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
self.__infix.append(tok)
return
raise ParserError.InvalidExpr(11, self.__line, tok.pos, (prev_v, curr_v))
elif prev_t == Token.Fun:
if curr_v == Delimiter.SqrLpar:
self.__infix.append(tok)
return
raise ParserError.InvalidExpr(9, self.__line, tok.pos)
else:
if curr_v in [Bool.Neg, Delimiter.Lpar, Delimiter.CrlLpar]:
self.__infix.append(Token.Op(Binary.Mul))
self.__infix.append(tok)
return
else:
# Adjacent rule for Num/Var/Fun/Str/Bool token is as follows.
# 1. It can adjacent with any of Num/Var/Str/Bool token but it needs implicit multiplication b/w
# them.
# 2. It can adjacent with any of OP token, but if previous OP token is Trans/Rpar/SqrRpar/CrlRpar, it
# needs implicit multiplication b/w them.
# All other cases are illegal.
# The following logic is an implementation of these rules.
if prev_t == Token.Op:
if prev_v in [Unary.Trans, Delimiter.Rpar, Delimiter.SqrRpar, Delimiter.CrlRpar]:
self.__infix.append(Token.Op(Binary.Mul))
self.__infix.append(tok)
return
elif prev_t == Token.Fun:
raise ParserError.InvalidExpr(9, self.__line, tok.pos)
else:
self.__infix.append(Token.Op(Binary.Mul))
self.__infix.append(tok)
return
def __infix_to_postfix(self) -> None:
"""
Convert infix-ordered tokens to postfix-order.
For conversion, it uses two-stack approach described in the reference below.
With deliberately assigned inner and outer precedence of operators and functions, this conversion takes account
for precedence and association rule b/w them.
Thus after conversion, there is no parentheses and delimiters.
This method is private and called internally as the third step of parsing chain.
For detailed description for parsing chain, refer to the comments of ``Parser.parse``.
**Reference**
* https://www.geeksforgeeks.org/infix-to-postfix-using-different-precedence-values-for-in-stack-and-out-stack
:raise INVALID_EXPR: If the input string is invalid expression.
"""
assert self.__infix
self.__tmp_stk.clear()
for tok in self.__infix:
tok_t: type = type(tok)
if tok_t == Token.Op:
# If Rpar token is encountered, this means that parenthesis is closed.
# Thus it pops tokens from temporary stack and add them to postfix list until matching Lpar token
# appears.
# However, if matching parenthesis does not appear or SqrLpar/CrlLpar/Com token comes first, then
# this implies that there is some parenthesis matching problem.
if tok.v == Delimiter.Rpar:
while self.__tmp_stk:
if self.__tmp_stk[-1].v in [Delimiter.Lpar, Delimiter.SqrLpar, Delimiter.CrlLpar,
Delimiter.Com]:
break
self.__postfix.append(self.__tmp_stk.pop())
if not (self.__tmp_stk and self.__tmp_stk[-1].v == Delimiter.Lpar):
raise ParserError.InvalidExpr(20, self.__line, tok.pos)
self.__tmp_stk.pop()
continue
elif tok.v == Delimiter.SqrRpar:
# If SqrRpar token is encountered, it pops tokens in temporary stack and add them to postfix list as
# it did for Rpar token.
# Again, if matching parenthesis does not appear or Lpar/CrlLpar comes first, then this implies that
# there is some parenthesis matching problem.
# However, there are two slight differences this time.
# First, by counting the # of Com tokens encountered during popping, it determines the # of
# parameters for function or indexing operation.
# But since 0 parameter and 1 parameter cannot be differentiated by the # of Com tokens, it checks
# whether the top of postfix list is Void token or not before popping.
# The existence of Void token with no encounter of Com token implies 0 parameter.
# Second, it determines whether SqrLpar and SqrRpar tokens are used to function call or indexing
# operation by inspecting the top token in temporary stack after popping.
# It is function call iff there exists Fun token.
void_flag: bool = (type(self.__postfix[-1]) == Token.Void) # Void function flag.
void_pos: int = len(self.__postfix) - 1 # Idx of void token.
argc: int = 1 # # of arguments.
while self.__tmp_stk:
if self.__tmp_stk[-1].v in [Delimiter.Lpar, Delimiter.SqrLpar, Delimiter.CrlLpar]:
break
elif self.__tmp_stk[-1].v == Delimiter.Com:
argc += 1
self.__tmp_stk.pop()
else:
self.__postfix.append(self.__tmp_stk.pop())
if not (self.__tmp_stk and self.__tmp_stk[-1].v == Delimiter.SqrLpar):
raise ParserError.InvalidExpr(20, self.__line, tok.pos)
match: Token.Tok = self.__tmp_stk.pop() # Matched parenthesis.
argc = 0 if void_flag and argc == 1 else argc
if self.__tmp_stk and type(self.__tmp_stk[-1]) == Token.Fun:
self.__tmp_stk[-1].argc = argc
self.__postfix.append(self.__tmp_stk.pop())
else:
self.__postfix.append(Token.Op(Delimiter.Idx, match.pos))
self.__postfix[-1].argc = argc + 1
if argc == 0:
del self.__postfix[void_pos]
continue
elif tok.v == Delimiter.CrlRpar:
# If CrlRpar token is encountered, it pops tokens in temporary stack and add them to postfix list as
# it did for SqrRpar token.
# Again, if matching parenthesis does not appear or Lpar/SqrLpar token comes first, then this
# implies that there is some parenthesis matching problem.
# One slight difference this time is that, List token will be added to postfix list after popping.
void_flag: bool = (type(self.__postfix[-1]) == Token.Void) # Empty list flag.
void_pos: int = len(self.__postfix) - 1 # Idx of void token.
sz: int = 1 # Size of list.
while self.__tmp_stk:
if self.__tmp_stk[-1].v in [Delimiter.Lpar, Delimiter.SqrLpar, Delimiter.CrlLpar]:
break
elif self.__tmp_stk[-1].v == Delimiter.Com:
sz += 1
self.__tmp_stk.pop()
else:
self.__postfix.append(self.__tmp_stk.pop())
if not (self.__tmp_stk and self.__tmp_stk[-1].v == Delimiter.CrlLpar):
raise ParserError.InvalidExpr(20, self.__line, tok.pos)
sz = 0 if void_flag and sz == 1 else sz
self.__postfix.append(Token.List(self.__tmp_stk[-1].pos, sz))
self.__tmp_stk.pop()
if sz == 0:
del self.__postfix[void_pos]
continue
elif tok.v == Delimiter.Com:
# If Com token is encountered, it pops tokens in temporary stack and add them to postfix list until
# SqrLpar/CrlLpar/Com token appears.
# If SqrLpar/CrlLpar/Com token does not appear or Lpar token comes first, then this implies that
# there is problem in the usage of Com token.
# One slight difference this time is that, we leave Com token in the temporary stack.
# They will be popped by SqrRpar/CrlRpar token later.
while self.__tmp_stk:
if self.__tmp_stk[-1].v in [Delimiter.Lpar, Delimiter.SqrLpar, Delimiter.CrlLpar,
Delimiter.Com]:
break
self.__postfix.append(self.__tmp_stk.pop())
if not self.__tmp_stk or self.__tmp_stk[-1].v == Delimiter.Lpar:
raise ParserError.InvalidExpr(21, self.__line, tok.pos)
self.__tmp_stk.append(tok)
continue
elif tok.v == Unary.Trans:
self.__postfix.append(tok)
continue
else:
# OP token which it not Rpar/SqrRpar/CrlRpar/Com goes to temporary stack.
# Before push, it compares the precedence b/w the top token in the temporary stack and the token to
# be pushed. (If temporary stack is empty, it just pushes.)
# If the top token has higher inner precedence than outer precedence of token to be pushed, pop the
# temporary stack and add them to postfix list until the relation inverts.
# Since the precedence is deliberately designed, this simple procedure automatically reorder
# operations according to the precedence and association rule b/w operations.
if not self.__tmp_stk:
self.__tmp_stk.append(tok)
continue
if self.__tmp_stk[-1].precd_in > tok.precd_out:
while self.__tmp_stk and self.__tmp_stk[-1].precd_in > tok.precd_out:
self.__postfix.append(self.__tmp_stk.pop())
self.__tmp_stk.append(tok)
continue
elif tok_t == Token.Fun:
# Fun token goes directly to temporary stack.
self.__tmp_stk.append(tok)
continue
else:
# Num/Var/Str/Bool/Void tokens goes directly to the postfix list.
self.__postfix.append(tok)
continue
# After all tokens being processed, pop all remaining tokens in temporary stack and add them to postfix list.
# Note that there must be no Lpar/SqrLpar/CrlLpar token.
# The existence of these tokens implies parenthesis matching problem.
while self.__tmp_stk:
if self.__tmp_stk[-1].v in [Delimiter.Lpar, Delimiter.SqrLpar, Delimiter.CrlLpar]:
raise ParserError.InvalidExpr(20, self.__line, self.__tmp_stk[-1].pos)
self.__postfix.append(self.__tmp_stk.pop())
def __ast_gen(self) -> AST.AST:
"""
Generate AST from postfix expression.
For generation, it uses stack approach described in the references below.
This method is private and called internally as the third step of parsing chain.
For detailed description for parsing chain, refer to the comments of ``Parser.parse``.
**Reference**
* https://en.wikipedia.org/wiki/Binary_expression_tree
* https://www.geeksforgeeks.org/expression-tree
:return: Generated AST.
:rtype: AST.AST
"""
assert self.__postfix
self.__tmp_stk.clear()
for tok in self.__postfix:
tok_t: type = type(tok)
if tok_t == Token.Op:
# OP token pops ASTs in temporary stack and merge them into one AST rooted to itself.
# The # of ASTs to be popped depends on the type of operator.
# Note that the order of ASTs must be reversed.
# That is, the top AST in the temporary stack should be registered as the last children and so on.
for i in range(tok.argc, 0, -1):
tok.add_chd(self.__tmp_stk[-i])
self.__tmp_stk = self.__tmp_stk[:-tok.argc]
self.__tmp_stk.append(tok)
continue
elif tok_t in [Token.Fun, Token.List]:
# Fun/List token is dealt similarly as OP token.
if tok.argc == 0:
self.__tmp_stk.append(tok)
continue
else:
for i in range(tok.argc, 0, -1):
tok.add_chd(self.__tmp_stk[-i])
self.__tmp_stk = self.__tmp_stk[:-tok.argc]
self.__tmp_stk.append(tok)
continue
else:
# Num/Var/Str/Bool/Void token goes directly to temporary as single (intermediate) AST.
# They will be merged latter to form one final AST with will be returned.
self.__tmp_stk.append(tok)
continue
# After iteration, there must be one final AST.
assert len(self.__tmp_stk) == 1
return AST.AST(self.__tmp_stk.pop(), self.__line)
@classmethod
def inst(cls) -> Parser:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Parser
"""
if not cls.__inst:
cls.__inst = Parser()
return cls.__inst
def parse(self, line: str, debug: bool = False) -> AST.AST:
"""
Parse user input line and generate AST.
Parsing consists of four steps.
1. Initialize parser.
At this step, it clears internal buffers.
2. Run lexer.
Lexer tokenizes input string and do lexical analysis to detect syntax errors.
3. Convert infix to postfix expression.
For generation of AST, it converts infix expression to postfix expression.
Further, it checks whether parenthesis is balanced.
If the input passes lexer and this converter with no error, one can ensure that there is no syntax error.
4. Generate AST.
Generate AST using postfix converted expression.
This method supports brief summary outputs which can be used for debugging or generation of debug set.
:param line: Original user input string to be parsed.
:type line: str
:param debug: Flag for debug mode. (Default: False)
:type debug: bool
:return: Generated AST.
:rtype: AST.AST
"""
if debug:
from sys import getsizeof
buf: Type.BufT = Type.BufT.DEBUG # Debug buffer.
self.__line = line
# Print out parsing target.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('parsing target'), buf)
Printer.Printer.inst().buf(f'@raw : {line}', buf, indent=2)
Printer.Printer.inst().buf(f'@size: {len(line)} chars ({getsizeof(line)} bytes)', buf, indent=2)
Printer.Printer.inst().buf_newline(buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('parsing chain'), buf)
# Initialize parser.
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Initializing parser'), buf, False, 2)
self.__init()
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
Printer.Printer.inst().buf(f'@__infix : {len(self.__infix)} (cleared)', buf, indent=4)
Printer.Printer.inst().buf(f'@__postfix: {len(self.__postfix)} (cleared)', buf, indent=4)
Printer.Printer.inst().buf_newline(buf)
# Run lexer.
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running lexer'), buf, False, 2)
try:
self.__lexer()
except Error.ParserErr as parser_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
Printer.Printer.inst().buf_newline(buf)
raise parser_err
else:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
# Print out generated infix-ordered tokens.
for i in range(len(self.__infix)):
Printer.Printer.inst().buf(f'[{i}] {type(self.__infix[i]).__name__.upper()}', buf, indent=4)
Printer.Printer.inst().buf(f'@val: {self.__infix[i].v_str()}', buf, indent=6)
Printer.Printer.inst().buf(f'@pos: {self.__infix[i].pos}', buf, indent=6)
Printer.Printer.inst().buf_newline(buf)
# Convert infix expression to postfix expression.
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running infix to postfix converter'), buf, False,
2)
try:
self.__infix_to_postfix()
except Error.ParserErr as parser_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
Printer.Printer.inst().buf_newline(buf)
raise parser_err
else:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
# Print out converted postfix-ordered tokens.
for i in range(len(self.__postfix)):
Printer.Printer.inst().buf(f'[{i}] {type(self.__postfix[i]).__name__.upper()}', buf, indent=4)
Printer.Printer.inst().buf(f'@val: {self.__postfix[i].v_str()}', buf, indent=6)
Printer.Printer.inst().buf(f'@pos: {self.__postfix[i].pos}', buf, indent=6)
Printer.Printer.inst().buf_newline(buf)
# Generate AST.
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running AST generator'), buf, False, 2)
expr: AST.AST = self.__ast_gen() # Generated AST
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
Printer.Printer.inst().buf(f'@AST: {expr}', buf, indent=4)
Printer.Printer.inst().buf_newline(buf)
return expr
else:
self.__line = line
self.__init()
self.__lexer()
self.__infix_to_postfix()
return self.__ast_gen()
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,542 | eik4862/TinyCalculator | refs/heads/master | /Core/Token.py | from __future__ import annotations
from typing import List, final, Union, Any, Dict
from Core import AST, TypeSystem, TypeChecker
from Function import *
from Operator import *
class Tok:
"""
Token class.
:ivar __v: Value of token. (Default: None)
:ivar __pos: Position in the raw input string where token is derived. (Default: None)
"""
def __init__(self, v: Union[int, float, complex, str, bool, Operator.Op, Function.Fun] = None,
pos: int = None) -> None:
self.__v: Union[int, float, complex, str, bool, Operator.Op, Function.Fun] = v
self.__pos: int = pos
self.__t: TypeSystem.T = None
self.__t_var: TypeChecker.TVar = None
@property
def v(self) -> Union[int, float, complex, str, bool, Operator.Op, Function.Fun]:
"""
Getter for the value of token.
Value of void token is ``none``.
:return: Token value.
:rtype: Union[int, float, complex, str, bool, Operator.Op, Function.Fun]
"""
return self.__v
@property
def pos(self) -> int:
"""
Getter for the position in the raw input string where token is derived.
:return: The position where token is derived.
:rtype: int
"""
return self.__pos
@property
def t(self) -> TypeSystem.T:
return self.__t
@property
def t_var(self) -> TypeChecker.TVar:
return self.__t_var
@v.setter
def v(self, v: Union[int, float, complex, str, bool, Operator.Op, Function.Fun]) -> None:
"""
Setter for token value.
:param v: Token value to be set.
:type v: Union[int, float, complex, str, bool, Operator.Op, Function.Fun]
"""
self.__v = v
@t.setter
def t(self, t: TypeSystem.T) -> None:
self.__t = t
@t_var.setter
def t_var(self, t_var: TypeChecker.TVar) -> None:
self.__t_var = t_var
def v_str(self) -> str:
return str(self.__v)
@final
class Num(Tok):
"""
Numeric token class.
"""
def __init__(self, v: Union[int, float, complex], pos: int = None) -> None:
super().__init__(v, pos)
@final
class Op(Tok):
"""
Operator token class.
:ivar __chd: List of children tokens.
:ivar __argc: # of operands.
"""
def __init__(self, v: Operator.Op, pos: int = None) -> None:
super().__init__(v, pos)
self.__chd: List[Tok] = []
self.__argc: int = v.argc()
@property
def precd_in(self) -> int:
"""
Getter for inner precedence.
:return: Inner precedence.
:rtype: int
"""
return super().v.precd_in()
@property
def precd_out(self) -> int:
"""
Getter for outer precedence.
:return: Outer precedence.
:rtype: int
"""
return super().v.precd_out()
@property
def chd(self) -> List[Tok]:
"""
Getter for child list.
:return: Child list.
:rtype: List[Tok]
"""
return self.__chd
@property
def argc(self) -> int:
"""
Getter for operand #.
:return: Operand #.
:rtype: int
"""
return self.__argc
@Tok.v.setter
def v(self, v: Operator.Op) -> None:
Tok.v.fset(self, v)
self.__argc: int = v.argc()
@chd.setter
def chd(self, chd: List[Tok]) -> None:
"""
Setter for child list.
:param chd: Child list to be set.
:type: List[Token]
"""
self.__chd = chd
@argc.setter
def argc(self, argc: int) -> None:
"""
Setter for operand #.
:param argc: The # of operand to be set.
:type argc: int
"""
self.__argc = argc
def v_str(self) -> str:
return super().v.__name__.upper()
def add_chd(self, tok: Tok) -> None:
"""
Append child to the child list.
:param tok: Child to be appended.
:type tok: Tok
"""
self.__chd.append(tok)
def swap_chd(self, tok: Tok, idx: int) -> None:
"""
Replace child at specific position in child list.
:param tok: New token to be replaced with.
:type tok: Tok
:param idx: Position in child list to be replaced.
:type idx: int
"""
self.__chd[idx] = tok
def del_chd(self, idx: int) -> None:
"""
Delete child at specific position in child list.
:param idx: Position in child list to be deleted.
:type idx: int
"""
del self.__chd[idx]
@final
class Var(Tok):
"""
Variable token class.
"""
def __init__(self, v: int, pos: int = None) -> None:
super().__init__(v, pos)
def __del__(self) -> None:
pass
def v_str(self) -> str:
return AST.AST.var_name(super().v)
@final
class Fun(Tok):
"""
Function token class.
:ivar __chd: Child list.
:ivar __argc: # of arguments.
"""
def __init__(self, v: Function.Fun, pos: int = None) -> None:
super().__init__(v, pos)
self.__chd: List[Tok] = []
self.__argc: int = 0
@property
def precd_in(self) -> int:
"""
Getter for inner precedence.
:return: Inner precedence.
:rtype: int
"""
return super().v.precd_in()
@property
def precd_out(self) -> int:
"""
Getter for outer precedence.
:return: Outer precedence.
:rtype: int
"""
return super().v.precd_out()
@property
def argc(self) -> int:
"""
Getter for argument #.
:return: Argument #.
:rtype: int
"""
return self.__argc
@property
def chd(self) -> List[Tok]:
"""
Getter for child list.
:return: Child list.
:rtype: List[Tok]
"""
return self.__chd
def v_str(self) -> str:
return super().v.__name__
@argc.setter
def argc(self, argc: int) -> None:
"""
Setter for argument #.
:param argc: The # of arguments to be set.
:type argc: int
"""
self.__argc = argc
@chd.setter
def chd(self, chd: List[Tok]) -> None:
"""
Setter for child list.
:param chd: Child list to be set.
:type: List[Token]
"""
self.__chd = chd
def add_chd(self, tok: Tok) -> None:
"""
Append child to the child list.
:param tok: Child to be appended.
:type tok: Tok
"""
self.__chd.append(tok)
def swap_chd(self, tok: Tok, idx: int) -> None:
"""
Set child at specific position in child list.
:param tok: Token to be set as children.
:type tok: Tok
:param idx: Position in child list to be set.
:type idx: int
"""
self.__chd[idx] = tok
@final
class Str(Tok):
"""
String token class.
"""
def __init__(self, v: str, pos: int = None) -> None:
super().__init__(v, pos)
def v_str(self) -> str:
v: str = super().v
buf: str = '"' # Buffer for unescaped string.
i: int = 0
while i < len(v):
if v[i] == '\\':
buf += '\\\\'
elif v[i] == '\n':
buf += '\\n'
elif v[i] == '\t':
buf += '\\t'
elif v[i] == '"':
buf += '\\"'
else:
buf += v[i]
i += 1
return buf + '"'
@final
class Bool(Tok):
"""
Boolean token class.
"""
def __init__(self, v: bool, pos: int = None) -> None:
super().__init__(v, pos)
@final
class List(Tok):
"""
List token class.
:ivar __chd: Child list.
:ivar __argc: # of items.
"""
def __init__(self, pos: int = None, argc: int = 0) -> None:
super().__init__(pos=pos)
self.__chd: List[Tok] = []
self.__argc: int = argc
@property
def argc(self) -> int:
"""
Getter for the # of items.
:return: # of items.
:rtype: int
"""
return self.__argc
@property
def chd(self) -> List[Tok]:
"""
Getter for child list.
:return: Child list.
:rtype: List[Tok]
"""
return self.__chd
def add_chd(self, tok: Tok) -> None:
"""
Append child to the child list.
:param tok: Child to be appended.
:type tok: Tok
"""
self.__chd.append(tok)
@final
class Void(Tok):
"""
Void token class.
"""
def __init__(self) -> None:
super().__init__()
def v_str(self) -> str:
return ''
@final
class Ter(Tok):
"""
Terminal token class.
This class is only used as temporarily to check the terminal condition of expression.
"""
def __init__(self) -> None:
super().__init__()
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,543 | eik4862/TinyCalculator | refs/heads/master | /Error/ParserError.py | from typing import final, Tuple
from Error import Error
from Operator import *
@final
class EmptyExpr(Error.ParserErr):
def __init__(self, errno: int) -> None:
super().__init__(errno)
@final
class InvalidTok(Error.ParserErr):
def __init__(self, errno: int, line: str, pos: int) -> None:
super().__init__(errno)
self.__line: str = line
self.__pos: int = pos
@property
def line(self) -> str:
return self.__line
@property
def pos(self) -> int:
return self.__pos
@final
class InvalidExpr(Error.ParserErr):
def __init__(self, errno: int, line: str, pos: int, err_op: Tuple[Operator.Op, Operator.Op] = None) -> None:
super().__init__(errno)
self.__line: str = line
self.__pos: int = pos
self.__err_op: Tuple[Operator.Op, Operator.Op] = err_op
@property
def line(self) -> str:
return self.__line
@property
def pos(self) -> int:
return self.__pos
@property
def err_op(self) -> Tuple[Operator.Op, Operator.Op]:
"""
Getter for pair of erroneous operators.
:return: Pair of erroneous operators.
:rtype: Tuple[Operator.Op, Operator.Op]
"""
return self.__err_op
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,544 | eik4862/TinyCalculator | refs/heads/master | /Function/__init__.py | __all__ = ['Function', 'Integer', 'Division', 'Combination', 'Trigonometric', 'Hyperbolic', 'Exponential', 'Gamma',
'General', 'Link', 'Signal']
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,545 | eik4862/TinyCalculator | refs/heads/master | /Function/General.py | from typing import final
from Function import Function
class GenFun(Function.Fun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class DiracDelta(GenFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class HeavisideTheta(GenFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class DiracComb(GenFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class HeavisidePi(GenFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class HeavisideLambda(GenFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,546 | eik4862/TinyCalculator | refs/heads/master | /Test/TestManager.py | import os
import statistics
import time
from decimal import Decimal
from typing import List, final, TextIO, Final, Tuple
from Core import Type, DB
from Function import Trigonometric, Hyperbolic, SpecialFunction, Exponential
from Util import Printer
@final
class TestManager:
"""
Run tests for numerical analysis.
Test inputs and reference outputs are generated by MATLAB using arbitrary precision arithmetic.
By default, the precision of inputs and outputs are 100 which is quite high.
Test results are also processed and analyzed by MATLAB using arbitrary precision arithmetic.
For detailed code and explanation, refer to comments in ``test_gen.m`` and ``plt_err.m``.
For the concept of arbitrary precision arithmetic, consult the references below.
This class is implemented as singleton.
For the concept of singleton pattern, consult the references below.
**Reference**
* https://en.wikipedia.org/wiki/Arbitrary-precision_arithmetic
* https://en.wikipedia.org/wiki/Singleton_pattern
:cvar __MAX_PRINTOUT: Maximum # of lines of test results to be printed out.
:cvar __inst: Singleton object.
"""
__MAX_PRINTOUT: Final[int] = 10
__inst = None
@classmethod
def inst(cls):
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: TestManager
"""
if not cls.__inst:
cls.__inst = TestManager()
return cls.__inst
def test(self, fun: Type.FunT = None, verb: bool = False) -> None:
"""
Run test, report with supplementary statistics and save the result.
One should load test DB before calling this function.
Otherwise, it will fail in unexpected way.
The result will be stored as a text file as ``../Test/Test.out`` wrt. current working directory.
:param fun: List of functions to be tested.
:type fun: List[Type.FunT]
:param verb: Flag for verbose mode. (Default: False)
:type verb: bool
"""
buf: Type.BufT = Type.BufT.DEBUG # Debug buffer.
if fun:
test_in: List[Decimal] = DB.DB.inst().get_test_in()
test_ref: List[Decimal] = DB.DB.inst().get_test_ref()
argc: int = int(len(test_in) / len(test_ref))
test_in: List[List[Decimal]] = [[test_in[argc * i + j] for j in range(argc)] for i in
range(int(len(test_in) / argc))]
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('test info'), buf)
Printer.Printer.inst().buf(f'@target : {fun.name.capitalize()}', buf, indent=2)
Printer.Printer.inst().buf(f'@size : {len(test_ref)}', buf, indent=2)
Printer.Printer.inst().buf(f'@argc : {argc}', buf, indent=2)
Printer.Printer.inst().buf_newline(buf)
# Start test.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('start test'), buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running test'), buf, False, 2)
start: float = time.process_time() # Start time stamp for elapsed time measure.
if fun in [Type.FunT.Sin, Type.FunT.Cos, Type.FunT.Tan, Type.FunT.Csc, Type.FunT.Sec, Type.FunT.Cot,
Type.FunT.ArcSin, Type.FunT.ArcCos, Type.FunT.ArcTan, Type.FunT.ArcCsc, Type.FunT.ArcSec, Type.FunT.ArcCot]:
test_out: List[Decimal] = Trigonometric.TriFun.test(fun, test_in) # Test output.
elif fun in [Type.FunT.Sinh, Type.FunT.Cosh, Type.FunT.Tanh, Type.FunT.ArcSinh, Type.FunT.ArcCosh,
Type.FunT.ArcTanh]:
test_out: List[Decimal] = Hyperbolic.HypbolicFunc.test(fun, test_in) # Test output.
elif fun in [Type.FunT.Gamma, Type.FunT.LogGamma, Type.FunT.Erf, Type.FunT.Erfc, Type.FunT.Beta]:
test_out: List[Decimal] = SpecialFunction.SpecialFun.test(fun, test_in) # Test output.
else:
test_out: List[Decimal] = Exponential.ExpFun.test(fun, test_in) # Test output.
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
# Write test output.
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Writing test results'), buf, False, 2)
if not os.path.isdir('../Test/Out'):
os.mkdir('../Test/Out')
try:
out: TextIO = open('../Test/Out/Test.out', 'w') # File to write output.
except OSError as os_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
raise os_err
for n in test_out:
Printer.Printer.inst().buf(f'{n:.100e}', Type.BufT.INTERNAL, True)
Printer.Printer.inst().print(Type.BufT.INTERNAL, out)
try:
out.close()
except OSError as os_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
raise os_err
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
Printer.Printer.inst().buf_newline(buf)
elapsed: float = time.process_time() - start # Elapsed time.
sz: int = os.path.getsize('../Test/Out/Test.out') # Test output file size.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('test finished'), buf)
Printer.Printer.inst().buf(f'@out :../Test/Out/Test.out ({sz}bytes)', buf, indent=2)
Printer.Printer.inst().buf(f'@elapsed: {elapsed * 1000:.2f}ms', buf, indent=2)
Printer.Printer.inst().buf_newline(buf)
# Report.
test_err: List[Decimal] = list(map(lambda x, y: abs(x - y), test_ref, test_out)) # Test abs error.
quant: List[Decimal] = statistics.quantiles(test_err) # Quantiles.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('test report', 133), buf)
Printer.Printer.inst().buf(f'@mean : {statistics.fmean(test_err):.20e}', buf, indent=2)
Printer.Printer.inst().buf(f'@sd : {statistics.stdev(test_err):.20e}', buf, indent=2)
Printer.Printer.inst().buf(f'@min : {min(test_err):.20e}', buf, indent=2)
Printer.Printer.inst().buf(f'@1st quant: {quant[0]:.20e}', buf, indent=2)
Printer.Printer.inst().buf(f'@mode : {quant[1]:.20e}', buf, indent=2)
Printer.Printer.inst().buf(f'@3rd quant: {quant[2]:.20e}', buf, indent=2)
Printer.Printer.inst().buf(f'@max : {max(test_err):.20e}', buf, indent=2)
Printer.Printer.inst().buf('@brief :', buf, indent=2)
Printer.Printer.inst().buf(
Printer.Printer.inst().f_tb(['INPUT', 'REFERENCE', 'OUTPUT', 'ABSOLUTE ERROR'], 30), buf, indent=7)
Printer.Printer.inst().buf(Printer.Printer.inst().f_hline(30, 4), buf, indent=7)
if verb:
for i in range(len(test_in)):
Printer.Printer.inst().buf(f'[{i:04d}] {test_in[i][0]:30.20e} {test_ref[i]:30.20e} '
f'{test_out[i]:30.20e} {test_err[i]:30.20e}', buf)
for j in range(argc - 1):
Printer.Printer.inst().buf(f'{test_in[i][j + 1]:30.20e}', buf, indent=7)
else:
for i in range(len(test_in)):
if i > self.__MAX_PRINTOUT:
Printer.Printer.inst().buf(Printer.Printer.inst().f_tb(['...', '...', '...', '...'], 30), buf,
indent=7)
Printer.Printer.inst().buf(f'Followed by {len(test_out) - self.__MAX_PRINTOUT} more lines.',
buf, indent=2)
break
Printer.Printer.inst().buf(
f'[{i:04d}] {test_in[i][0]:30.20e} {test_ref[i]:30.20e} {test_out[i]:30.20e} '
f'{test_err[i]:30.20e}', buf)
for j in range(argc - 1):
Printer.Printer.inst().buf(f'{test_in[i][j + 1]:30.20e}', buf, indent=7)
Printer.Printer.inst().buf_newline(buf)
else:
target: List[Tuple[Type.FunT, Type.TestSzT]] = DB.DB.inst().get_test_target()
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('test info'), buf)
Printer.Printer.inst().buf(f'@size : {len(target)}', buf, indent=2)
Printer.Printer.inst().buf(f'@target :', buf, indent=2)
for i in range(len(target)):
Printer.Printer.inst().buf(
f'[{i}] {target[i][0].name.capitalize()} ({target[i][1].name.lower()} input)', buf, indent=4)
Printer.Printer.inst().buf_newline(buf)
# Start test.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('start test'), buf)
test_out: List[List[Decimal]] = []
out_path: List[str] = []
start: float = time.process_time() # Start time stamp for elapsed time measure.
for i in range(len(target)):
test_in: List[Decimal] = DB.DB.inst().get_test_in(*target[i])
test_ref: List[Decimal] = DB.DB.inst().get_test_ref(*target[i])
argc: int = int(len(test_in) / len(test_ref))
test_in: List[List[Decimal]] = [[test_in[argc * i + j] for j in range(argc)] for i in
range(int(len(test_in) / argc))]
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog(f'[{i}] Running test #{i}'), buf, False, 2)
if target[i][0] in [Type.FunT.Sin, Type.FunT.Cos, Type.FunT.Tan, Type.FunT.Csc, Type.FunT.Sec,
Type.FunT.Cot, Type.FunT.ArcSin, Type.FunT.ArcCos, Type.FunT.ArcTan, Type.FunT.ArcCsc,
Type.FunT.ArcSec, Type.FunT.ArcCot]:
test_out.append(Trigonometric.TriFun.test(target[i][0], test_in))
elif target[i][0] in [Type.FunT.Sinh, Type.FunT.Cosh, Type.FunT.Tanh, Type.FunT.Csch, Type.FunT.Sech,
Type.FunT.Coth, Type.FunT.ArcSinh, Type.FunT.ArcCosh, Type.FunT.ArcTanh,
Type.FunT.ArcCsch, Type.FunT.ArcSech, Type.FunT.ArcCoth]:
test_out.append(Hyperbolic.HypbolicFunc.test(target[i][0], test_in))
elif target[i][0] in [Type.FunT.Exp, Type.FunT.Log, Type.FunT.Pow, Type.FunT.Sqrt, Type.FunT.Log2,
Type.FunT.Log10]:
test_out.append(Exponential.ExpFun.test(target[i][0], test_in))
else:
test_out.append(SpecialFunction.SpecialFun.test(target[i][0], test_in))
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
if not os.path.isdir('../Test/Out'):
os.mkdir('../Test/Out')
for i in range(len(target)):
out_path.append(
f'../Test/Out/Test_{target[i][0].name.capitalize()}_{target[i][1].name.capitalize()}.out')
try:
out: TextIO = open(out_path[-1], 'w')
except OSError as os_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
raise os_err
for n in test_out[i]:
Printer.Printer.inst().buf(f'{n:.100e}', Type.BufT.INTERNAL, True)
Printer.Printer.inst().print(Type.BufT.INTERNAL, out)
try:
out.close()
except OSError as os_err:
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('fail', Type.Col.RED), buf)
raise os_err
Printer.Printer.inst().buf_newline(buf)
elapsed: float = time.process_time() - start # Elapsed time.
sz: List[int] = [os.path.getsize(file) for file in out_path] # Test output file size.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('test finished'), buf)
Printer.Printer.inst().buf('@out :', buf, indent=2)
for i in range(len(out_path)):
Printer.Printer.inst().buf(f'[{i}] {out_path[i]} ({sz[i]}bytes)', buf, indent=4)
Printer.Printer.inst().buf(f'@elapsed: {elapsed * 1000:.2f}ms', buf, indent=2)
Printer.Printer.inst().buf_newline(buf)
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,547 | eik4862/TinyCalculator | refs/heads/master | /Warning/ParserWarning.py | from Warning import Warning
class NumWarn(Warning.ParserWarn):
def __init__(self, warnno: int, pos: int):
super().__init__(warnno)
self.__pos: int = pos
@property
def pos(self) -> int:
return self.__pos
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,548 | eik4862/TinyCalculator | refs/heads/master | /Operator/Operator.py | from __future__ import annotations
from typing import List, Tuple
class Op:
"""
Operator toolbox.
Precedence and association rules.
1. Precedence basically inherits that of C.
ADD/SUB < MUL/DIV/REM < POW < PLUS/MINUS < FACT.
2. Association rule also inherits that of C.
ADD/SUB/MUL/DIV/REM: Left to right.
POW/FACT/PLUS/MINUS: Right to left.
:cvar __idx_l: Temporary index list for simplification.
"""
__ARGC: int = None
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
#
# @classmethod
# def pck(cls, rt: Token.Op, prn: Token.Tok) -> Tuple[Token.Tok, List[Warning.InterpWarn]]:
# """
# Simplifier for operators.
#
# It does following simplifications.
# 1. Constant folding.
# Expressions like ``2 + 3`` or ``2 * 3`` can be reduced to ``5`` or ``6``, resp
# Thus complex expression may be folded into simple one NUM token.
# 2. Sign propagation.
# In expression like ``(-x) * y`` or ``(-x) / y``, the minus sign can be propagated to outside as
# ``-(x * y)`` or ``-(x / y)``, resp.
# Although the # of operation does not reduce, minus sign can be disappear later by dead expression
# stripping.
# 3. Dead expression stripping.
# In expression like ``x * 1`` or ``-(-x)``, multiplication by 1 or two contiguous minus sign is useless.
# Thus both can be reduced to ``x`` which is much simpler.
# Note that this with sign propagation can eliminate many sign inversions.
# 4. Hoisting.
# Originally generated AST is binary tree.
# But from the associativity of ADD and MUL and equivalence relation of operators, ``x / y = x * (y^-1)``
# and ``x - y = x + (-y)``, it can be reformed to general tree with multiple children, reducing the height
# of tree.
# 5. Packing.
# In expression like ``x + -y + -z`` or ``x * (y^-1) * (z^-1)``, if can be packed to ``x + -(y + z)`` or
# ``x * (y * z)^-1``, reducing the # of operations.
# From hoisting and unpacking, sign inversion and power of -1 may be frequently appear in an expression.
# But this packing optimization can reduce those.
# 6. Unpacking.
# In expression like ``-(x + y)`` or ``-(x * y)``, it can be rewritten as ``(-x) + (-y)`` or
# ``x * y * -1``.
# At first glance, this might seem making expression more complex, not simplifying it.
# But with hoisting and sign propagation, this can be make the expression much simpler later.
# Also, packing will unwind this later, even if hoisting and sign propagation does not simplify these
# unwound minus signs.
# Most of these simplification tricks are originally part of compiler optimization and programing language scheme.
# For details, consult following references.
#
# **Reference**
# * https://en.wikipedia.org/wiki/Constant_folding
# * https://en.wikipedia.org/wiki/Dead_code_elimination
# * https://developer.mozilla.org/ko/docs/Glossary/Hoisting
#
# :param rt: Root of partial AST to be simplified.
# :type rt: Token.Op
# :param prn: Parent of root to be simplified.
# :type prn: Token.Tok
#
# :return: Root of simplified partial AST and list of generated warnings.
# :rtype: Tuple[Token.Tok, List[Warning.InterpWarn]]
#
# :raise NAN_DETECT: If nan is detected as one of operand.
# :raise INF_DETECT: If inf is detected as one of operand.
# :raise DOMAIN_OUT: If given operand is not in domain.
# :raise POLE_DETECT: If mathematical pole is detected.
# :raise BIG_INT: If given operand exceeds floating point max.
# :raise SMALL_INT: If given operand exceeds floating point min.
# """
# sgn: bool = False # Flag for sign propagation. True means there is sign propagation.
# tmp: Token.Tok = None # Temporary variable holding token.
# warn: List[Warning.InterpWarn] = [] # List of generated warnings.
#
# if rt.v == Type.OpT.ADD:
# # Hoisting.
# # For hoisting, it uses following rules.
# # 1. (a + ... + z) + (A + ... + Z) = a + ... + z + A + ... + Z
# # 2. (a + ... + z) + A = a + ... + z + A
# # 3. a + (A + ... + Z) = a + A + ... + Z
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.ADD:
# if rt.chd[1].v == Type.OpT.ADD:
# rt.chd = rt.chd[0].chd + rt.chd[1].chd
# else:
# rt.chd = rt.chd[0].chd + rt.chd[1:]
# elif rt.chd[1].v == Type.OpT.ADD:
# rt.chd = [rt.chd[0]] + rt.chd[1].chd
#
# # If there is parent which can handle hoisted children, let it handle.
# if prn and prn.v in [Type.OpT.ADD, Type.OpT.SUB]:
# return rt, warn
#
# cls.__idx_l.clear()
#
# for i in range(len(rt.chd)):
# if rt.chd[i].tok_t == Type.TokT.NUM:
# cls.__idx_l.append(i)
#
# # If there are no numeric children, we are done.
# # Do packing and return.
# # For packing, it uses following rules.
# # 1. x + (-y) + (-z) = x + -(y + z)
# # 2. (-x) + (-y) = -(x + y)
# # The following logic is an implementation of these rules.
# if not cls.__idx_l:
# for i in range(len(rt.chd)):
# if rt.chd[i].v == Type.OpT.MINUS:
# cls.__idx_l.append(i)
#
# if 1 < len(cls.__idx_l) < len(rt.chd):
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(Token.Op(Type.OpT.ADD))
#
# for idx in reversed(cls.__idx_l):
# tmp.chd[0].add_chd(rt.chd[idx].chd[0])
# rt.del_chd(idx)
#
# rt.add_chd(tmp)
#
# return rt, warn
# elif len(cls.__idx_l) == len(rt.chd):
# for i in range(len(rt.chd)):
# rt.swap_chd(rt.chd[i].chd[0], i)
#
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# else:
# return rt, warn
#
# # Check for warnings.
# # Addition generates warning for followings cases.
# # 1. Any operand exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. Any operand is nan. (NAN_DETECT)
# # 3. Any operand is inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# for idx in cls.__idx_l:
# if is_bigint(rt.chd[idx].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=idx + 1, handle='addition'))
# rt.chd[idx].v = math.inf
# elif is_smallint(rt.chd[idx].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 19, arg_pos=idx + 1, handle='addition'))
# rt.chd[idx].v = -math.inf
# elif math.isnan(rt.chd[idx].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=idx + 1, handle='addition'))
# elif math.isinf(rt.chd[idx].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=idx + 1, handle='addition'))
#
# # Constant folding.
# # Addition (b/w two operand left and right) has following rules.
# # 1. If any of left or right is nan, the result is nan.
# # 2. If left is +-inf and right is +-inf, the result is +-inf, resp.
# # 3. If left is +-inf and right is -+inf, the result is nan.
# # 5. If left is +-inf and right is finite, the result is +-inf, resp.
# # 7. If both left and right is finite, the result is left + right.
# # The following logic is an implementation of these rules.
# for idx in reversed(cls.__idx_l[1:]):
# rt.chd[cls.__idx_l[0]].v += rt.chd[idx].v
# rt.del_chd(idx)
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rule.
# # 1. x + 0 = x
# # 2. x + nan = nan
# # The following logic is an implementation of this rule.
# if len(rt.chd) > 1:
# if rt.chd[cls.__idx_l[0]].v == 0:
# rt.del_chd(cls.__idx_l[0])
# elif math.isnan(rt.chd[cls.__idx_l[0]].v):
# return rt.chd[cls.__idx_l[0]], warn
#
# # Packing.
# # For packing, it uses following rules.
# # 1. x + (-y) + (-z) = x + -(y + z)
# # 2. -x + n = -(x + -n)
# # 2. (-x) + (-y) = -(x + y)
# # 3. x + Nil = x
# # The following logic is an implementation of these rules.
# cls.__idx_l.clear()
#
# if len(rt.chd) == 1:
# return rt.chd[0], warn
#
# for i in range(len(rt.chd)):
# if rt.chd[i].v == Type.OpT.MINUS or rt.chd[i].tok_t == Type.TokT.NUM:
# cls.__idx_l.append(i)
#
# if 1 < len(cls.__idx_l) < len(rt.chd):
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(Token.Op(Type.OpT.ADD))
#
# for idx in reversed(cls.__idx_l):
# if rt.chd[idx].tok_t == Type.TokT.NUM:
# rt.chd[idx].v *= -1
# tmp.chd[0].add_chd(rt.chd[idx])
# else:
# tmp.chd[0].add_chd(rt.chd[idx].chd[0])
#
# rt.del_chd(idx)
#
# rt.add_chd(tmp)
#
# return rt, warn
# elif len(cls.__idx_l) == len(rt.chd):
# for i in range(len(rt.chd)):
# if rt.chd[i].tok_t == Type.TokT.NUM:
# rt.chd[i].v *= -1
# else:
# rt.swap_chd(rt.chd[i].chd[0], i)
#
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# else:
# return rt, warn
# if rt.v == Type.OpT.SUB:
# # Hoisting.
# # For hoisting, it uses following rules with simple optimization -(-x) = x.
# # 1. (a + ... + z) - (A + ... + Z) = a + ... + z + (-A) + ... + (-Z)
# # 2. (a + ... + z) - A = a + ... + z + (-A)
# # 3. a - (A + ... + Z) = a + (-A) + ... + (-Z)
# # 4. a - A = a + (-A)
# # The following logic is an implementation of these rules.
# rt.v = Type.OpT.ADD
#
# if rt.chd[0].v == Type.OpT.ADD:
# if rt.chd[1].v == Type.OpT.ADD:
# for i in range(len(rt.chd[1].chd)):
# if rt.chd[1].chd[i].v == Type.OpT.MINUS:
# rt.chd[1].swap_chd(
# rt.chd[1].chd[i].chd[0], i)
# elif rt.chd[1].chd[i].tok_t == Type.TokT.NUM:
# rt.chd[1].chd[i].v *= -1
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[1].chd[i])
# rt.chd[1].swap_chd(tmp, i)
#
# rt.chd = rt.chd[0].chd + rt.chd[1].chd
# else:
# if rt.chd[1].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[1].chd[0], 1)
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# rt.chd[1].v *= -1
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[1])
# rt.chd = rt.chd[0].chd
# rt.add_chd(tmp)
# elif rt.chd[1].v == Type.OpT.ADD:
# for i in range(len(rt.chd[1].chd)):
# if rt.chd[1].chd[i].v == Type.OpT.MINUS:
# rt.chd[1].swap_chd(
# rt.chd[1].chd[i].chd[0], i)
# elif rt.chd[1].chd[i].tok_t == Type.TokT.NUM:
# rt.chd[1].chd[i].v *= -1
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[1].chd[i])
# rt.chd[1].swap_chd(tmp, i)
#
# rt.chd = rt.chd[:-1] + rt.chd[1].chd
# else:
# if rt.chd[1].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[1].chd[0], 1)
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# rt.chd[1].v *= -1
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[1])
# rt.swap_chd(tmp, 1)
#
# # If there is parent which can handle hoisted children, let it handle.
# # Otherwise, call simplify method for addition to handle.
# if prn and prn.v in [Type.OpT.ADD, Type.OpT.SUB]:
# return rt, warn
# else:
# return cls.pck(rt, prn)
# elif rt.v == Type.OpT.MUL:
# # Hoisting.
# # For hoisting, it uses following rules.
# # 1. (a * ... * z) * (A * ... * Z) = a * ... * z * A * ... * Z
# # 2. (a * ... * z) * A = a * ... * z * A
# # 3. a * (A * ... * Z) = a * A * ... * Z
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MUL:
# if rt.chd[1].v == Type.OpT.MUL:
# rt.chd = rt.chd[0].chd + rt.chd[1].chd
# else:
# rt.chd = rt.chd[0].chd + rt.chd[1:]
# elif rt.chd[1].v == Type.OpT.MUL:
# rt.chd = [rt.chd[0]] + rt.chd[1].chd
#
# # If there is parent which can handle hoisted children, let it handle.
# if prn and prn.v == Type.OpT.MUL:
# return rt, warn
#
# cls.__idx_l.clear()
#
# # Sign propagation.
# # If the # of MINUS is odd, the sign can be propagated.
# for i in range(len(rt.chd)):
# if rt.chd[i].v == Type.OpT.MINUS:
# sgn = not sgn
# rt.swap_chd(rt.chd[i].chd[0], i)
# elif rt.chd[i].tok_t == Type.TokT.NUM:
# cls.__idx_l.append(i)
#
# # Check for warnings.
# # Multiplication generates warning for followings cases.
# # 1. Any operand exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. Any operand is nan. (NAN_DETECT)
# # 3. Any operand is inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# for idx in cls.__idx_l:
# if is_bigint(rt.chd[idx].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=idx + 1, handle='multiplication'))
# rt.chd[idx].v = math.inf
# elif is_smallint(rt.chd[idx].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 19, arg_pos=idx + 1, handle='multiplication'))
# rt.chd[idx].v = -math.inf
# elif math.isnan(rt.chd[idx].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=idx + 1, handle='multiplication'))
# elif math.isinf(rt.chd[idx].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=idx + 1, handle='multiplication'))
#
# # If there are no numeric children, we are done.
# # Do packing and return.
# # For packing, it uses following rules with consideration of sign propagation.
# # 1. x * (y^-1) * (z^-1) = x * ((y * z)^-1)
# # 2. (x^-1) * (y^-1) = (x * y)^-1
# # The following logic is an implementation of these rules.
# if not cls.__idx_l:
# for i in range(len(rt.chd)):
# if rt.chd[i].v == Type.OpT.POW and rt.chd[i].chd[1].v == -1:
# cls.__idx_l.append(i)
#
# if sgn:
# if 1 < len(cls.__idx_l) < len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), rt.chd[cls.__idx_l[0]].chd[1]]
#
# for idx in reversed(cls.__idx_l):
# tmp.chd[0].add_chd(rt.chd[idx].chd[0])
# rt.del_chd(idx)
#
# rt.add_chd(tmp)
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# elif len(cls.__idx_l) == len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), rt.chd[cls.__idx_l[0]].chd[1]]
#
# for i in range(len(rt.chd)):
# rt.swap_chd(rt.chd[i].chd[0], i)
#
# tmp.chd[0].chd = rt.chd
# rt, tmp = tmp, Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# else:
# if 1 < len(cls.__idx_l) < len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), rt.chd[cls.__idx_l[0]].chd[1]]
#
# for idx in reversed(cls.__idx_l):
# tmp.chd[0].add_chd(rt.chd[idx].chd[0])
# rt.del_chd(idx)
#
# rt.add_chd(tmp)
#
# return rt, warn
# elif len(cls.__idx_l) == len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), rt.chd[cls.__idx_l[0]].chd[1]]
#
# for i in range(len(rt.chd)):
# rt.swap_chd(rt.chd[i].chd[0], i)
#
# tmp.chd[0].chd = rt.chd
#
# return tmp, warn
# else:
# return rt, warn
#
# # Constant folding.
# # Multiplication (b/w two operand left and right) has following rules.
# # 1. If any of left or right is nan, the result is nan.
# # 2. If left is +-inf and right is +-inf, the result is +inf.
# # 3. If left is +-inf and right is -+inf, the result is -inf.
# # 4. If left is +-inf and right is 0, the result is nan.
# # 5. If left is +-inf and right is finite positive, the result is +-inf, resp.
# # 6. If left is +-inf and right is finite negative, the result is -+inf, resp.
# # 7. If both left and right is finite, the result is left * right.
# # The following logic is an implementation of these rules.
# for idx in reversed(cls.__idx_l[1:]):
# rt.chd[cls.__idx_l[0]].v *= rt.chd[idx].v
# rt.del_chd(idx)
#
# rt.chd[cls.__idx_l[0]].v *= -1 if sgn else 1
# sgn = False
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. x * 1 = x
# # 2. x * -1 = -x
# # 3. x * nan = nan
# # The following logic is an implementation of these rules.
# if len(rt.chd) > 1:
# if rt.chd[cls.__idx_l[0]].v == 1:
# rt.del_chd(cls.__idx_l[0])
# elif rt.chd[cls.__idx_l[0]].v == -1:
# sgn = True
# rt.del_chd(cls.__idx_l[0])
# elif math.isnan(rt.chd[cls.__idx_l[0]].v):
# return rt.chd[cls.__idx_l[0]], warn
#
# # Packing.
# # For packing, it uses following rules.
# # 1. x * (y^-1) * (z^-1) = x * ((y * z)^-1)
# # 2. (x^-1) * n = (x * n^-1)^-1
# # 2. (x^-1) * (y^-1) = (x * y)^-1
# # 3. x * Nil = x
# # The following logic is an implementation of these rules.
# cls.__idx_l.clear()
#
# if len(rt.chd) == 1:
# if sgn:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[0])
#
# return tmp, warn
# else:
# return rt.chd[0], warn
# else:
# for i in range(len(rt.chd)):
# if (rt.chd[i].v == Type.OpT.POW and rt.chd[i].chd[1].v == -1) or \
# (rt.chd[i].tok_t == Type.TokT.NUM and rt.chd[i].v != 0):
# cls.__idx_l.append(i)
#
# if sgn:
# if 1 < len(cls.__idx_l) < len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), Token.Num(-1)]
#
# for idx in reversed(cls.__idx_l):
# if rt.chd[idx].tok_t == Type.TokT.NUM:
# rt.chd[idx].v **= -1
# tmp.chd[0].add_chd(rt.chd[idx])
# else:
# tmp.chd[0].add_chd(rt.chd[idx].chd[0])
#
# rt.del_chd(idx)
#
# rt.add_chd(tmp)
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# elif len(cls.__idx_l) == len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), Token.Num(-1)]
#
# for i in range(len(rt.chd)):
# if rt.chd[i].tok_t == Type.TokT.NUM:
# rt.chd[i].v **= -1
# else:
# rt.swap_chd(rt.chd[i].chd[0], i)
#
# tmp.chd[0].chd = rt.chd
# rt, tmp = tmp, Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt)
#
# return tmp, warn
# else:
# if 1 < len(cls.__idx_l) < len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), Token.Num(-1)]
#
# for idx in reversed(cls.__idx_l):
# if rt.chd[idx].tok_t == Type.TokT.NUM:
# rt.chd[idx].v **= -1
# tmp.chd[0].add_chd(rt.chd[idx])
# else:
# tmp.chd[0].add_chd(rt.chd[idx].chd[0])
#
# rt.del_chd(idx)
#
# rt.add_chd(tmp)
#
# return rt, warn
# elif len(cls.__idx_l) == len(rt.chd):
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [Token.Op(Type.OpT.MUL), Token.Num(-1)]
#
# for i in range(len(rt.chd)):
# if rt.chd[i].tok_t == Type.TokT.NUM:
# rt.chd[i].v **= -1
# else:
# rt.swap_chd(rt.chd[i].chd[0], i)
#
# tmp.chd[0].chd = rt.chd
#
# return tmp, warn
# else:
# return rt, warn
# elif rt.v == Type.OpT.DIV:
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. x / (-y) = -x / y
# # 2. (-x) / (-y) = x / y
# # 3. n / (-y) = (-n) / y
# # 4. (x * y) / -z = (x * y * -1) / z
# # The following logic is an implementation of this rule.
# if rt.chd[1].v == Type.OpT.MINUS:
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0], 0)
# elif rt.chd[0].v == Type.OpT.MUL:
# rt.chd[0].add_chd(Token.Num(-1))
# elif rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v *= -1
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[0])
# rt.swap_chd(tmp, 0)
#
# rt.swap_chd(rt.chd[1].chd[0], 1)
#
# # Hoisting.
# # For hoisting, it uses following rules with simple optimization (x^-1)^-1 = x, (x^y)^-1=x^(-y).
# # 1. (a * ... * z) / (A * ... * Z) = a * ... * z * (A^-1) * ... * (Z^-1)
# # 2. (a * ... * z) / A = a * ... * z * (A^-1)
# # 3. a / (A * ... * Z) = a * (A^-1) * ... * (Z^-1)
# # 4. a / A = a * (A^-1)
# # The following logic is an implementation of these rules.
# rt.v = Type.OpT.MUL
#
# if rt.chd[0].v == Type.OpT.MUL:
# if rt.chd[1].v == Type.OpT.MUL:
# for i in range(len(rt.chd[1].chd)):
# if rt.chd[1].chd[i].v == Type.OpT.POW:
# if rt.chd[1].chd[i].chd[1].v == -1:
# rt.chd[1].swap_chd(rt.chd[1].chd[i].chd[0], i)
# else:
# rt.chd[1].chd[i].chd[1].v *= -1
# elif rt.chd[1].chd[i].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].chd[i].v) or is_smallint(rt.chd[1].chd[i].v):
# rt.chd[1].chd[i].v = 0
# elif rt.chd[1].chd[i].v == 0:
# rt.chd[1].chd[i].v = math.nan
# else:
# rt.chd[1].chd[i].v **= -1
# else:
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [rt.chd[1].chd[i], Token.Num(-1)]
# rt.chd[1].swap_chd(tmp, i)
#
# rt.chd = rt.chd[0].chd + rt.chd[1].chd
# else:
# if rt.chd[1].v == Type.OpT.POW:
# if rt.chd[1].chd[1].v == -1:
# rt.swap_chd(rt.chd[1].chd[0], 1)
# else:
# rt.chd[1].chd[1].v = -rt.chd[1].chd[1].v
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].v) or is_smallint(rt.chd[1].v):
# rt.chd[1].v = 0
# elif rt.chd[1].v == 0:
# rt.chd[1].v = math.nan
# else:
# rt.chd[1].v **= -1
# else:
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [rt.chd[1], Token.Num(-1)]
# rt.chd = rt.chd[0].chd
# rt.add_chd(tmp)
# elif rt.chd[1].v == Type.OpT.MUL:
# for i in range(len(rt.chd[1].chd)):
# if rt.chd[1].chd[i].v == Type.OpT.POW:
# if rt.chd[1].chd[i].chd[1].v == -1:
# rt.chd[1].swap_chd(rt.chd[1].chd[i].chd[0], i)
# else:
# rt.chd[1].chd[i].chd[1].v *= -1
# elif rt.chd[1].chd[i].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].chd[i].v) or is_smallint(rt.chd[1].chd[i].v):
# rt.chd[1].chd[i].v = 0
# elif rt.chd[1].chd[i].v == 0:
# rt.chd[1].chd[i].v = math.nan
# else:
# rt.chd[1].chd[i].v **= -1
# else:
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [rt.chd[1].chd[i], Token.Num(-1)]
# rt.chd[1].swap_chd(tmp, i)
#
# rt.chd = rt.chd[:-1] + rt.chd[1].chd
# else:
# if rt.chd[1].v == Type.OpT.POW:
# if rt.chd[1].chd[1].v == -1:
# rt.swap_chd(rt.chd[1].chd[0], 1)
# else:
# rt.chd[1].chd[1].v *= -1
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].v) or is_smallint(rt.chd[1].v):
# rt.chd[1].v = 0
# elif rt.chd[1].v == 0:
# rt.chd[1].v = math.nan
# else:
# rt.chd[1].v **= -1
# else:
# tmp = Token.Op(Type.OpT.POW)
# tmp.chd = [rt.chd[1], Token.Num(-1)]
# rt.swap_chd(tmp, 1)
#
# # If there is parent which can handle hoisted children, let it handle.
# # Otherwise, call simplify method for addition to handle.
# if prn and prn.v in [Type.OpT.DIV, Type.OpT.MUL]:
# return rt, warn
# else:
# return cls.pck(rt, prn)
# elif rt.v == Type.OpT.REM:
# # Sign propagation.
# # For sign propagation, it uses following rules.
# # 1. (-x) % (-y) = -(x % y)
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == rt.chd[1].v == Type.OpT.MINUS:
# sgn = True
# tmp = rt.chd[0]
# rt.chd = [rt.chd[0].chd[0], rt.chd[1].chd[0]]
#
# # Check for warnings.
# # Remainder operator generates warning for followings cases.
# # 1. Nominator or denominator exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. Nominator or denominator is nan. (NAN_DETECT)
# # 3. Nominator or denominator is inf. (INF_DETECT)
# # 4. Denominator is 0. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=1, handle='remainder operation'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 19, arg_pos=1, handle='remainder operation'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=1, handle='remainder operation'))
# elif math.isinf(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=1, handle='remainder operation'))
#
# if rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=2, handle='remainder operation'))
# rt.chd[1].v = math.inf
# elif is_smallint(rt.chd[1].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 19, arg_pos=2, handle='remainder operation'))
# rt.chd[1].v = -math.inf
# elif math.isnan(rt.chd[1].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=2, handle='remainder operation'))
# elif math.isinf(rt.chd[1].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=2, handle='remainder operation'))
# elif rt.chd[1].v == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 10))
#
# # Constant folding.
# # Remainder operator has following rules.
# # 1. If any of nominator or denominator is nan, the result is nan.
# # 2. If nominator is +-inf or denominator is 0, the result is nan.
# # 3. If nominator is finite nonnegative and denominator is +inf, the result is nominator itself.
# # 4. If nominator is finite nonnegative and denominator is -inf, the result is -inf.
# # 5. If nominator is finite nonpositive and denominator is -inf, the result is nominator itself.
# # 6. If nominator is finite nonpositive and denominator is +inf, the result is +inf.
# # 7. If nominator is finite and denominator is finite nonzero, the result is nominator % denominator.
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == rt.chd[1].tok_t == Type.TokT.NUM:
# if rt.chd[1].v == 0:
# rt.chd[0].v = math.nan
# else:
# rt.chd[0].v = rt.chd[0].v % rt.chd[1].v
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. nan % x = nan
# # 2. x % nan = nan
# # 3. x % 0 = nan
# # 4. +-inf % x = nan
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if math.isnan(rt.chd[0].v):
# return rt.chd[0], warn
# elif math.isinf(rt.chd[0].v):
# rt.chd[0].v = math.nan
#
# return rt.chd[0], warn
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# if math.isnan(rt.chd[1].v):
# return rt.chd[1], warn
# elif rt.chd[1].v == 0:
# rt.chd[1].v = math.nan
#
# return rt.chd[1], warn
#
# if sgn:
# tmp.swap_chd(rt, 0)
# else:
# tmp = rt
#
# return tmp, warn
# elif rt.v == Type.OpT.FACT:
# # Check for warnings.
# # Factorial operator generates warning for followings cases.
# # 1. Operand exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. Operand is nan. (NAN_DETECT)
# # 3. Nominator is inf. (INF_DETECT)
# # 4. Operand is negative integer. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=1, handle='factorial operation'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 9))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=1, handle='factorial operation'))
# elif math.isinf(rt.chd[0].v):
# warn.append(
# Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=1, handle='factorial operation'))
# elif rt.chd[0].v % 1 == 0 and rt.chd[0].v < 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 9))
#
# # Constant folding.
# # Factorial operator has following rules.
# # 1. If operand is nan, the result is nan.
# # 2. If operand is +inf, the result is +inf.
# # 3. If operand is -inf or finite negative integer, the result is nan.
# # 4. If operand is finite nonnegative integer, the result is ``math.factorial(x)``.
# # 5. If operand is finite noninteger, the result is ``math.gamma(x + 1)``.
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if rt.chd[0].v < 0 and math.isinf(rt.chd[0].v):
# rt.chd[0].v = math.nan
# elif rt.chd[0].v % 1 == 0:
# if rt.chd[0].v >= 0:
# rt.chd[0].v = math.factorial(rt.chd[0].v)
# else:
# rt.chd[0].v = math.nan
# else:
# try:
# rt.chd[0].v = math.gamma(rt.chd[0].v + 1)
# except OverflowError:
# rt.chd[0].v = math.inf
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.OpT.POW:
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. (-x)^y = -(x^y) if y is odd.
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS and rt.chd[1].tok_t == Type.TokT.NUM and rt.chd[1].v % 2 == 1:
# sgn = True
# tmp = rt.chd[0]
# rt.swap_chd(tmp.chd[0], 0)
#
# # Check for warnings.
# # Power operator generates warning for followings cases.
# # 1. Base or exponent exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. Base or exponent is nan. (NAN_DETECT)
# # 3. Base or exponent is inf. (INF_DETECT)
# # 4. Base is 0 and exponent is finite negative integer. (POLE_DETECT)
# # 5. Base is finite negative and exponent is finite noninteger. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=1, handle='power operation'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 19, arg_pos=1, handle='power operation'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=1, handle='power operation'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=1, handle='power operation'))
#
# if rt.chd[1].tok_t == Type.TokT.NUM:
# if rt.chd[0].v == 0 and rt.chd[1].v < 0:
# if rt.chd[1].v % 1 == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 11))
# else:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 11))
# elif rt.chd[0].v < 0 and rt.chd[1].v % 1 != 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 12))
#
# if rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 18, arg_pos=2, handle='power operation'))
# rt.chd[1].v = math.inf
# elif is_smallint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 19, arg_pos=2, handle='power operation'))
# rt.chd[1].v = -math.inf
# elif math.isnan(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 7, arg_pos=2, handle='power operation'))
# elif math.isinf(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 8, arg_pos=2, handle='power operation'))
#
# # Hoisting.
# # For hoisting, it uses following rules with simple optimization (x^-1)^-1 = x, (x^y)^-1=x^(-y).
# # 1. (x^y)^z = x^(y * z)
# # The following logic is an implementation of these rules.
# # Note that hoisting occurs only for NUN token.
# if rt.chd[0].v == Type.OpT.POW and rt.chd[1].tok_t == rt.chd[0].chd[1].tok_t == Type.TokT.NUM:
# rt.chd[1].v *= rt.chd[0].chd[1].v
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# # Constant folding.
# # Power operator has following rules.
# # 1. If exponent is 0, the result is 1.
# # 2. If exponent is nan, the result is nan.
# # 3. If base is 0 and exponent is +inf, the result is 0.
# # 4. If base is 0 and exponent is -inf, the result is +inf.
# # 5. If base is nan and exponent is nonzero, the result is nan.
# # 6. If base is not nan and exponent is +inf, the result is +inf.
# # 7. If base is not nan and exponent is -inf, the result is 0.
# # 8. If base is +inf and exponent is finite positive, the result is +inf.
# # 9. If base is -inf and exponent is finite positive which is not odd integer, the result is +inf.
# # 10. If base is -inf and exponent is finite positive odd integer, the result is -inf.
# # 11. If base is +-inf and exponent is finite negative, the result is 0.
# # 12. If base is 0 and exponent is finite negative, the result is nan.
# # 13. If base is 0 and exponent is finite positive, the result is 0.
# # 14. If base is finite negative and exponent is finite noninteger, the result is nan.
# # 15. If base is finite negative and exponent is finite integer, the result is base ** exponent.
# # 16. If base is finite positive and exponent is finite, the result is base ** exponent.
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == rt.chd[1].tok_t == Type.TokT.NUM:
# if rt.chd[0].v == 0 and rt.chd[1].v < 0:
# rt.chd[0].v = math.nan
# elif rt.chd[0].v < 0 and rt.chd[1].v % 1 != 0:
# rt.chd[0].v = math.nan
# else:
# try:
# rt.chd[0].v = rt.chd[0].v ** rt.chd[1].v
# except OverflowError:
# rt.chd[0].v = math.inf
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. x^0 = 1
# # 2. x^1 = x
# # 3. (-x)^y = x^y if y is even.
# # 4. x^nan = nan
# # The following logic is an implementation of these rules.
# if rt.chd[1].tok_t == Type.TokT.NUM:
# if rt.chd[1].v == 0:
# rt.chd[1].v = 1
#
# return rt.chd[1], warn
# elif rt.chd[1].v == 1:
# if sgn:
# tmp.swap_chd(rt.chd[0], 0)
# else:
# tmp = rt.chd[0]
#
# return tmp, warn
# elif rt.chd[0].v == Type.OpT.MINUS and rt.chd[1].v % 2 == 0:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
# elif math.isnan(rt.chd[1].v):
# return rt.chd[1], warn
#
# if sgn:
# tmp.swap_chd(rt, 0)
# else:
# tmp = rt
#
# return tmp, warn
# elif rt.v == Type.OpT.PLUS:
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. +x = x
# # The following logic is an implementation of these rules.
# return rt.chd[0], warn
# else:
# # Constant folding.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = -rt.chd[0].v
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. -(-x) = x
# # 2. -(x * n) = x * -n
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# return rt.chd[0].chd[0], warn
# elif rt.chd[0].v == Type.OpT.MUL:
# for tok in rt.chd[0].chd:
# if tok.tok_t == Type.TokT.NUM:
# tok.v *= -1
#
# return rt.chd[0], warn
#
# # Unpacking.
# # For distribution, it uses following rules with simple optimization -(-x) = x.
# # 1. -(a + ... + z) = (-a) + ... + (-z)
# # 2. -(a * ... * z) = a * ... * z * -1
# # The following logic is an implementation of these rules.
# if prn:
# if prn.v in [Type.OpT.ADD, Type.OpT.SUB] and rt.chd[0].v == Type.OpT.ADD:
# for i in range(len(rt.chd[0].chd)):
# if rt.chd[0].chd[i].v == Type.OpT.MINUS:
# rt.chd[0].swap_chd(
# rt.chd[0].chd[i].chd[0], i)
# elif rt.chd[0].chd[i].tok_t == Type.TokT.NUM:
# rt.chd[0].chd[i].v = -rt.chd[0].chd[i].v
# else:
# tmp = Token.Op(Type.OpT.MINUS)
# tmp.add_chd(rt.chd[0].chd[i])
# rt.chd[0].swap_chd(tmp, i)
#
# return rt.chd[0], warn
# elif prn.v in [Type.OpT.MUL, Type.OpT.DIV] and rt.chd[0].v == Type.OpT.MUL:
# rt.chd[0].add_chd(Token.Num(-1))
#
# return rt.chd[0], warn
# return rt, warn
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,549 | eik4862/TinyCalculator | refs/heads/master | /Core/TypeChecker.py | from __future__ import annotations
from typing import final, List, Tuple, Optional, Set, Dict
from Core import AST, Token, TypeSystem, Type
from Operator import *
from Util import Printer
from copy import copy
@final
class FVar:
__cnt: int = 0
def __init__(self) -> None:
self.__v: int = self.__cnt
self.__update()
def __eq__(self, other: FVar) -> bool:
return self.__v == other.v
def __lt__(self, other: FVar) -> bool:
return self.__v < other.v
def __le__(self, other: FVar) -> bool:
return self.__v <= other.v
@classmethod
def __update(cls) -> None:
cls.__cnt += 1
@property
def v(self) -> int:
return self.__v
@v.setter
def v(self, v: int) -> None:
self.__v = v
@final
class List2(TypeSystem.T):
def __init__(self, chd_t: TypeSystem.T, fold: FVar) -> None:
super().__init__(False)
self.__chd_t: TypeSystem.T = chd_t
self.__fold: FVar = fold
def __str__(self) -> str:
return f'List({self.__chd_t}, {self.__fold.v})'
@property
def chd_t(self) -> TypeSystem.T:
return self.__chd_t
@property
def fold(self) -> FVar:
return self.__fold
@fold.setter
def fold(self, fold: FVar) -> None:
self.__fold = fold
@final
class TVar:
__cnt: int = 0
def __init__(self) -> None:
self.__v: int = self.__cnt
self.__update()
def __eq__(self, other: TVar) -> bool:
return self.__v == other.v
def __lt__(self, other: TVar) -> bool:
return self.__v < other.v
def __le__(self, other: TVar) -> bool:
return self.__v <= other.v
@classmethod
def __update(cls) -> None:
cls.__cnt += 1
@property
def v(self) -> int:
return self.__v
@v.setter
def v(self, v: int) -> None:
self.__v = v
@final
class FConst:
__idx_map: List[Tuple[int, int]] = []
__idx_src: List[Tuple[int, int]] = []
__merge_it: List[int] = []
def __init__(self, var: List[FVar], offset: List[Optional[int]]) -> None:
self.__var: List[FVar] = var
self.__offset: List[Optional[int]] = offset
self.__eq: bool = offset[0] is None
# Debugging
# latter remove
def __str__(self) -> str:
buf: str = '{'
for i in range(len(self.__var)):
buf += f'{self.__var[i].v}:{self.__offset[i]}, '
return buf[:-2] + '}'
def __add__(self, other: FConst) -> FConst:
assert self.__eq == other.eq == False
merged_var: List[FVar] = []
merged_offset: List[int] = []
i: int = 0
j: int = 0
while i < len(self.__var) and j < len(other.var):
while i < len(self.__var) and self.__var[i] < other.var[j]:
merged_var.append(self.__var[i])
merged_offset.append(self.__offset[i])
i += 1
if i == len(self.__var):
continue
while j < len(other.var) and self.__var[i] > other.var[j]:
merged_var.append(other.var[j])
merged_offset.append(other.offset[j])
j += 1
if i == len(self.__var):
merged_var += other.var[j:]
merged_offset += other.offset[j:]
else:
merged_var += self.__var[i:]
merged_offset += self.__offset[i:]
return FConst(merged_var, merged_offset)
def __mul__(self, other: FConst) -> Tuple[bool, Optional[FConst]]:
if self.__eq:
if other.eq:
return self.__mul_eq_eq_hlper(other)
else:
return self.__mul_eq_neq_hlpr(other)
else:
if other.eq:
return other * self
else:
return self.__mul_neq_neq_hlpr(other)
def __mul_eq_eq_hlper(self, other: FConst) -> Tuple[bool, Optional[FConst]]:
i: int = 0
j: int = 0
cnt: int = 0
new_var: List[FVar] = []
new_offset: List[Optional[int]] = []
self.__idx_map.clear()
self.__idx_src.clear()
self.__merge_it.clear()
while i < len(self.__var) and j < len(other.var):
while i < len(self.__var) and self.__var[i] < other.var[j]:
new_var.append(self.__var[i])
self.__idx_src.append((-1, i))
i += 1
if i == len(self.__var):
continue
if self.__var[i] == other.var[j]:
self.__idx_map.append((i, j))
self.__idx_src.append((0, cnt))
cnt += 1
i += 1
else:
self.__idx_src.append((1, j))
new_var.append(other.var[j])
j += 1
if not self.__idx_map:
return True, None
if i == len(self.__var):
self.__idx_src += [(1, k + j) for k in range(len(other.var[j:]))]
new_var += other.var[j:]
else:
self.__idx_src += [(-1, k + i) for k in range(len(self.__var[i:]))]
new_var += self.__var[i:]
assert len(self.__idx_src) == len(new_var)
# Equal ref
if self.__idx_map[0][0] == 0:
if self.__idx_map[0][1] == 0:
for idx in self.__idx_map[1:]:
if self.__offset[idx[0]] != other.offset[idx[1]]:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
new_offset.append(self.__offset[idx[1]])
elif idx[0] == 0:
new_offset.append(other.offset[self.__idx_map[idx[1]][1]])
else:
new_offset.append(other.offset[idx[1]])
return True, FConst(new_var, new_offset)
# Self ref and other offset
else:
addi: int = other.offset[self.__idx_map[0][1]]
for idx in self.__idx_map[1:]:
if other.offset[idx[1]] - self.__offset[idx[0]] != addi:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
new_offset.append(self.__offset[idx[1]] + addi)
elif idx[0] == 0:
new_offset.append(other.offset[self.__idx_map[idx[1]][1]])
else:
new_offset.append(other.offset[idx[1]])
return True, FConst(new_var, new_offset)
else:
# other ref and self offset
if self.__idx_map[0][1] == 0:
addi: int = self.__offset[self.__idx_map[0][0]]
for idx in self.__idx_map[1:]:
if self.__offset[idx[0]] - other.offset[idx[1]] != addi:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
new_offset.append(self.__offset[idx[1]])
elif idx[0] == 0:
new_offset.append(self.__offset[self.__idx_map[idx[1]][0]])
else:
new_offset.append(other.offset[idx[1]] + addi)
return True, FConst(new_var, new_offset)
else:
if self.__var[0] < other.var[0]:
addi: int = self.__offset[self.__idx_map[0][0]] - other.offset[self.__idx_map[0][1]]
for idx in self.__idx_map[1:]:
if self.__offset[idx[0]] - other.offset[idx[1]] != addi:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
new_offset.append(self.__offset[idx[1]])
elif idx[0] == 0:
new_offset.append(self.__offset[self.__idx_map[idx[1]][0]])
else:
if idx[1] == 0:
new_offset.append(addi)
else:
new_offset.append(other.offset[idx[1]] + addi)
return True, FConst(new_var, new_offset)
else:
addi: int = other.offset[self.__idx_map[0][1]] - self.__offset[self.__idx_map[0][0]]
for idx in self.__idx_map[1:]:
if other.offset[idx[1]] - self.__offset[idx[0]] != addi:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
if idx[1] == 0:
new_offset.append(addi)
else:
new_offset.append(self.__offset[idx[1]] + addi)
elif idx[0] == 0:
new_offset.append(other.offset[self.__idx_map[idx[1]][1]])
else:
new_offset.append(other.offset[idx[1]])
return True, FConst(new_var, new_offset)
def __mul_neq_neq_hlpr(self, other: FConst) -> Tuple[bool, Optional[FConst]]:
i: int = 0
j: int = 0
new_var: List[FVar] = []
new_offset: List[Optional[int]] = []
disjoint: bool = True
while i < len(self.__var) and j < len(other.var):
while i < len(self.__var) and self.__var[i] < other.var[j]:
new_var.append(self.__var[i])
new_offset.append(self.__offset[i])
i += 1
if i == len(self.__var):
continue
if self.__var[i] == other.var[j]:
if self.__offset[i] != other.offset[j]:
return False, None
i += 1
disjoint = False
new_var.append(other.var[j])
new_offset.append(other.offset[j])
j += 1
return (True, None) if disjoint else (True, FConst(new_var, new_offset))
def __mul_eq_neq_hlpr(self, other: FConst) -> Tuple[bool, Optional[FConst]]:
i: int = 0
j: int = 0
cnt: int = 0
new_var: List[FVar] = []
new_offset: List[Optional[int]] = []
self.__idx_map.clear()
self.__idx_src.clear()
self.__merge_it.clear()
while i < len(self.__var) and j < len(other.var):
while i < len(self.__var) and self.__var[i] < other.var[j]:
new_var.append(self.__var[i])
self.__idx_src.append((-1, i))
i += 1
if i == len(self.__var):
continue
if self.__var[i] == other.var[j]:
self.__idx_map.append((i, j))
self.__idx_src.append((0, cnt))
cnt += 1
i += 1
else:
self.__idx_src.append((1, j))
new_var.append(other.var[j])
j += 1
if not self.__idx_map:
return True, None
if i == len(self.__var):
self.__idx_src += [(1, k + j) for k in range(len(other.var[j:]))]
new_var += other.var[j:]
else:
self.__idx_src += [(-1, k + i) for k in range(len(self.__var[i:]))]
new_var += self.__var[i:]
assert len(self.__idx_src) == len(new_var)
# Ref eq something
if self.__idx_map[0][0] == 0:
addi: int = other.offset[self.__idx_map[0][1]]
for idx in self.__idx_map[1:]:
if other.offset[idx[1]] - self.__offset[idx[0]] != addi:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
new_offset.append(self.__offset[idx[1]] + addi)
elif idx[0] == 0:
new_offset.append(other.offset[self.__idx_map[idx[1]][1]])
else:
new_offset.append(other.offset[idx[1]])
return True, FConst(new_var, new_offset)
else:
addi: int = other.offset[self.__idx_map[0][1]] - self.__offset[self.__idx_map[0][0]]
for idx in self.__idx_map[1:]:
if other.offset[idx[1]] - self.__offset[idx[0]] != addi:
return False, None
for idx in self.__idx_src:
if idx[0] == -1:
if idx[1] == 0:
new_offset.append(addi)
else:
new_offset.append(self.__offset[idx[1]] + addi)
elif idx[0] == 0:
new_offset.append(other.offset[self.__idx_map[idx[1]][1]])
else:
new_offset.append(other.offset[idx[1]])
return True, FConst(new_var, new_offset)
@property
def var(self) -> List[FVar]:
return self.__var
@property
def offset(self) -> List[Optional[int]]:
return self.__offset
@property
def eq(self) -> bool:
return self.__eq
@var.setter
def var(self, var: List[FVar]) -> None:
self.__var = var
def drop(self) -> FConst:
if not self.__eq:
return self
drop_msk: List[bool] = [False for _ in range(len(self.__var))]
uniq_idx: List[int] = [0]
i: int = 1
while i < len(self.__var):
if self.__offset[i] == 0:
drop_msk[i] = True
self.__var[i].v = self.__var[0].v
i += 1
i: int = 1
while i < len(self.__var):
if drop_msk[i]:
i += 1
continue
uniq_idx.append(i)
j: int = i + 1
while j < len(self.__var):
if self.__var[i] == self.__var[j]:
drop_msk[j] = True
self.__var[j].v = self.__var[i]
j += 1
i += 1
return FConst([self.__var[idx] for idx in uniq_idx], [self.__offset[idx] for idx in uniq_idx])
@final
class TConst:
__idx_map: List[Tuple[int, int]] = []
__idx_src: List[Tuple[int, int]] = []
__merge_it: List[Tuple[int, int, List[TypeSystem.T], List[FConst]]] = []
__merge_idx: List[int] = []
def __init__(self, var: List[TVar], cand: List[List[TypeSystem.T]], f_const: List[List[FConst]]) -> None:
self.__var: List[TVar] = var
self.__cand: List[List[TypeSystem.T]] = cand
self.__f_const: List[List[FConst]] = f_const
self.__f_var_l: List[List[FVar]] = []
for it in f_const:
if not it:
self.__f_var_l.append([])
continue
var_l: List[FVar] = copy(it[0].var)
for const in it[1:]:
var_l = self.__merge(var_l, const.var)
self.__f_var_l.append(var_l)
def __getitem__(self, item: TVar) -> Optional[Set[TypeSystem.T]]:
pos: int = 0
while pos < len(self.__var):
if item == self.__var[pos]:
return {it[pos] for it in self.__cand}
elif item < self.__var[pos]:
return None
pos += 1
return None
def __mul__(self, other: TConst) -> Tuple[bool, Optional[TConst]]:
print('unifying two constraint table')
print('LHS')
print(' @var : ' + ', '.join([str(var.v) for var in self.__var]))
print(' @const:')
for i in range(len(self.__cand)):
print(f' [{i}] ' + ', '.join([str(t) for t in self.__cand[i]]) + ' ' +
' '.join([str(it) for it in self.__f_const[i]]) + ' / ' +
', '.join([str(var.v) for var in self.__f_var_l[i]]))
print('')
print('RHS')
print(' @var : ' + ', '.join([str(var.v) for var in other.var]))
print(' @const:')
for i in range(len(other.cand)):
print(f' [{i}] ' + ', '.join([str(t) for t in other.cand[i]]) + ' ' +
' '.join([str(it) for it in other.f_const[i]]) + ' / ' +
', '.join([str(var.v) for var in other.__f_var_l[i]]))
print('')
i: int = 0
j: int = 0
cnt: int = 0
new_var: List[TVar] = []
new_cand: List[List[TypeSystem.T]] = []
new_f_const: List[List[FConst]] = []
self.__idx_map.clear()
self.__idx_src.clear()
self.__merge_it.clear()
while i < len(self.__var) and j < len(other.var):
while i < len(self.__var) and self.__var[i] < other.var[j]:
new_var.append(self.__var[i])
self.__idx_src.append((-1, i))
i += 1
if i == len(self.__var):
continue
if self.__var[i] == other.var[j]:
self.__idx_map.append((i, j))
self.__idx_src.append((0, cnt))
cnt += 1
i += 1
else:
self.__idx_src.append((1, j))
new_var.append(other.var[j])
j += 1
if not self.__idx_map:
print('nothing to unify')
return True, None
if i == len(self.__var):
self.__idx_src += [(1, k + j) for k in range(len(other.var[j:]))]
new_var += other.var[j:]
else:
self.__idx_src += [(-1, k + i) for k in range(len(self.__var[i:]))]
new_var += self.__var[i:]
assert len(self.__idx_src) == len(new_var)
for i in range(len(self.__cand)):
for j in range(len(other.cand)):
merge_flag: bool = True
resolved: List[TypeSystem.T] = []
f_const: List[FConst] = []
for idx in self.__idx_map:
sub_t, const = self.__t_resolve(self.__cand[i][idx[0]], other.cand[j][idx[1]])
merge_flag &= bool(sub_t)
resolved.append(sub_t)
if const:
f_const.append(const)
if not merge_flag:
break
if merge_flag:
self.__merge_it.append((i, j, resolved, f_const))
if not self.__merge_it:
print('unification failed')
return False, None
for it in self.__merge_it:
new_it: List[TypeSystem.T] = []
for idx in self.__idx_src:
if idx[0] == -1:
new_it.append(self.__cand[it[0]][idx[1]])
elif idx[0] == 0:
new_it.append(it[2][idx[1]])
else:
new_it.append(other.cand[it[1]][idx[1]])
new_const_it: List[FConst] = self.__f_unify(self.__f_const[it[0]], other.f_const[it[1]])
if new_const_it is None:
continue
new_const_it = self.__f_unify(new_const_it, it[3])
if new_const_it is None:
continue
new_it, new_const_it = self.__alpha_conv(new_it, new_const_it,
self.__merge(self.__f_var_l[it[0]], other.f_var_l[it[1]]))
new_const_it = [const.drop() for const in new_const_it]
new_cand.append(new_it)
new_f_const.append(new_const_it)
if not new_cand:
print('unification failed')
return False, None
print('unification result')
print(' @var : ' + ', '.join([str(var.v) for var in new_var]))
print(' @const:')
for i in range(len(new_cand)):
print(f' [{i}] ' + ', '.join([str(t) for t in new_cand[i]]) + ' ' +
' '.join([str(it) for it in new_f_const[i]]))
print('')
return True, TConst(new_var, new_cand, new_f_const)
def __t_resolve(self, t1: TypeSystem.T, t2: TypeSystem.T) -> Tuple[Optional[TypeSystem.T], Optional[FConst]]:
if t1.base:
return (None, None) if not t2.base else (t1, None) if type(t1) == type(t2) else (None, None)
if t2.base or type(t1.chd_t) != type(t2.chd_t):
return None, None
if t1.fold == t2.fold:
return List2(t1.chd_t, t1.fold), None
elif t1.fold < t2.fold:
return List2(t1.chd_t, t1.fold), FConst([t1.fold, t2.fold], [None, 0])
else:
return List2(t1.chd_t, t1.fold), FConst([t2.fold, t1.fold], [None, 0])
def __f_unify(self, f_const_l1: List[FConst], f_const_l2: List[FConst]) -> Optional[List[FConst]]:
unified: List[FConst] = []
merged: List[FConst] = []
self.__merge_idx.clear()
f_const_l2 = copy(f_const_l2)
for i in range(len(f_const_l1)):
del_idx: List[int] = []
res: FConst = f_const_l1[i]
j: int = 0
while j < len(f_const_l2):
succ, tmp = res * f_const_l2[j]
if not succ:
return None
if tmp:
del_idx.append(j)
res = tmp
j += 1
if del_idx:
j = len(del_idx) - 1
while j >= 0:
del f_const_l2[del_idx[j]]
j -= 1
f_const_l2.append(res)
else:
unified.append(copy(res))
unified += f_const_l2
for i in range(len(unified)):
if unified[i].eq:
merged.append(unified[i])
else:
self.__merge_idx.append(i)
if len(self.__merge_idx) == 0:
return merged
res: FConst = unified[self.__merge_idx[0]]
for i in self.__merge_idx[1:]:
res += unified[i]
return merged + [res]
def __alpha_conv(self, cand: List[TypeSystem.T], f_const: List[FConst],
f_var: List[FVar]) -> Tuple[List[TypeSystem.T], List[FConst]]:
new_f_var: List[FVar] = [FVar() for _ in range(len(f_var))]
var_map: Dict[int, FVar] = {f_var[i].v: new_f_var[i] for i in range(len(f_var))}
for t in cand:
if not t.base:
t.fold = var_map[t.fold.v]
for const in f_const:
const.var = [var_map[var.v] for var in const.var]
return cand, f_const
def __merge(self, l1: list, l2: list) -> list:
i: int = 0
j: int = 0
res: list = []
while i < len(l1) and j < len(l2):
while i < len(l1) and l1[i] < l2[j]:
res.append(l1[i])
i += 1
if i == len(l1):
continue
if l1[i] == l2[j]:
i += 1
res.append(l2[j])
j += 1
if i == len(l1):
res += l2[j:]
else:
res += l1[i:]
return res
@property
def var(self) -> List[TVar]:
return self.__var
@property
def cand(self) -> List[List[TypeSystem.T]]:
return self.__cand
@property
def f_const(self) -> List[List[FConst]]:
return self.__f_const
@property
def f_var_l(self) -> List[List[FVar]]:
return self.__f_var_l
def empty(self) -> bool:
return not self.__var
def split(self) -> List[TConst]:
if len(self.__var) == 1:
return [self]
i: int = len(self.__var) - 1
del_idx: List[int] = []
frag: List[TConst] = []
while i >= 0:
split_flag: bool = True
j: int = 1
ref_t: TypeSystem.T = self.__cand[0][i]
while j < len(self.__cand) and split_flag:
split_flag &= (ref_t == self.__cand[j][i])
j += 1
if split_flag:
del_idx.append(i)
frag.append(TConst([self.__var[i]], [[self.__cand[0][i]]]))
i -= 1
if not frag:
return [self]
for idx in del_idx:
del self.__var[idx]
for it in self.__cand:
del it[idx]
return frag + [self] if self.__var else frag
def drop(self, var: TVar) -> Optional[TVar]:
i: int = 0
while i < len(self.__var) and self.__var[i] < var:
i += 1
if i < len(self.__var) and var == self.__var[i]:
print(f'drop {var.v} from constraint table')
print('TARGET')
print(' @var : ' + ', '.join([str(var.v) for var in self.__var]))
for it in self.__cand:
print(' @const: ' + ', '.join([str(t) for t in it]))
print('')
del self.__var[i]
for it in self.__cand:
del it[i]
print('drop result')
print(' @var : ' + ', '.join([str(var.v) for var in self.__var]))
for it in self.__cand:
print(' @const: ' + ', '.join([str(t) for t in it]))
print('')
return var
else:
return None
@final
class TChker:
__inst: TChker = None
def __init__(self) -> None:
self.__expr: AST.AST = None
self.__t_env: Dict[int, TVar] = {}
self.__t_const: List[TConst] = []
def __new_f_var(self, cnt: int) -> List[FVar]:
return [FVar() for _ in range(cnt)]
def __new_t_var(self) -> TVar:
t_var: TVar = TVar()
f_var: List[FVar] = self.__new_f_var(5)
self.__t_const.append(TConst([t_var],
[[TypeSystem.Real.inst()], [TypeSystem.Cmplx.inst()], [TypeSystem.Str.inst()],
[TypeSystem.Bool.inst()], [TypeSystem.Void.inst()],
[List2(TypeSystem.Real.inst(), f_var[0])],
[List2(TypeSystem.Cmplx.inst(), f_var[1])],
[List2(TypeSystem.Str.inst(), f_var[2])],
[List2(TypeSystem.Bool.inst(), f_var[3])],
[List2(TypeSystem.Void.inst(), f_var[4])]],
[[], [], [], [], [], [FConst([f_var[0]], [None])], [FConst([f_var[1]], [None])],
[FConst([f_var[2]], [None])], [FConst([f_var[3]], [None])],
[FConst([f_var[4]], [None])]]))
return t_var
def __t_unify(self, new_const: TConst) -> None:
new_const_l: List[TConst] = [new_const]
unified: List[TConst] = []
print('UNIFY')
for i in range(len(self.__t_const)):
del_idx: List[int] = []
res: TConst = self.__t_const[i]
j: int = 0
while j < len(new_const_l):
succ, tmp = res * new_const_l[j]
if not succ:
raise TypeError
if tmp:
del_idx.append(j)
res = tmp
j += 1
if del_idx:
j = len(del_idx) - 1
while j >= 0:
del new_const_l[del_idx[j]]
j -= 1
# new_const_l += res.split()
new_const_l += [res]
else:
unified.append(res)
self.__t_const = unified + new_const_l
# def __f_unify(self, new_const: FConst) -> None:
def __subst(self, var1: TVar, var2: TVar) -> None:
assert var1 < var2
self.__t_unify(TConst([var1, var2], [[TypeSystem.Real.inst(), TypeSystem.Real.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst()],
[TypeSystem.Str.inst(), TypeSystem.Str.inst()],
[TypeSystem.Bool.inst(), TypeSystem.Bool.inst()]]))
for i in range(len(self.__t_const)):
res: TVar = self.__t_const[i].drop(var2)
if res:
if self.__t_const[i].empty():
del self.__t_const[i]
print(f'sub: {var2.v} -> {var1.v}')
var2.v = var1.v
return
def __init(self) -> None:
self.__cnt = 0
self.__t_const = []
def __chk_t_hlpr(self, rt: Token.Tok):
tok_t: type = type(rt)
if tok_t == Token.Num:
if type(rt.v) == complex:
self.__t_unify(TConst([rt.t_var], [[TypeSystem.Cmplx.inst()]], [[]]))
else:
self.__t_unify(TConst([rt.t_var], [[TypeSystem.Real.inst()]], [[]]))
elif tok_t == Token.Str:
self.__t_unify(TConst([rt.t_var], [[TypeSystem.Str.inst()]], [[]]))
elif tok_t == Token.Bool:
self.__t_unify(TConst([rt.t_var], [[TypeSystem.Bool.inst()]], [[]]))
elif tok_t == Token.Var:
pass
# find: TVar = self.__t_env.get(rt.v)
#
# if find:
# self.__subst(find, rt.t_var)
# else:
# self.__t_env[rt.v] = rt.t_var
elif tok_t == Token.Op and rt.v == Binary.Add:
rt.chd[0].t_var = self.__new_t_var()
self.__chk_t_hlpr(rt.chd[0])
rt.chd[1].t_var = self.__new_t_var()
self.__chk_t_hlpr(rt.chd[1])
f_var: List[FVar] = self.__new_f_var(12)
self.__t_unify(TConst([rt.t_var, rt.chd[0].t_var, rt.chd[1].t_var],
[[TypeSystem.Real.inst(), TypeSystem.Real.inst(), TypeSystem.Real.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Real.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Real.inst(), TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst()],
[TypeSystem.Real.inst(), List2(TypeSystem.Real.inst(), f_var[0]),
List2(TypeSystem.Real.inst(), f_var[0])],
[TypeSystem.Real.inst(), List2(TypeSystem.Cmplx.inst(), f_var[1]),
List2(TypeSystem.Cmplx.inst(), f_var[1])],
[TypeSystem.Cmplx.inst(), List2(TypeSystem.Real.inst(), f_var[2]),
List2(TypeSystem.Cmplx.inst(), f_var[2])],
[TypeSystem.Cmplx.inst(), List2(TypeSystem.Cmplx.inst(), f_var[3]),
List2(TypeSystem.Cmplx.inst(), f_var[3])],
[List2(TypeSystem.Real.inst(), f_var[4]), TypeSystem.Real.inst(),
List2(TypeSystem.Real.inst(), f_var[4])],
[List2(TypeSystem.Cmplx.inst(), f_var[5]), TypeSystem.Real.inst(),
List2(TypeSystem.Cmplx.inst(), f_var[5])],
[List2(TypeSystem.Real.inst(), f_var[6]), TypeSystem.Cmplx.inst(),
List2(TypeSystem.Cmplx.inst(), f_var[6])],
[List2(TypeSystem.Cmplx.inst(), f_var[7]), TypeSystem.Cmplx.inst(),
List2(TypeSystem.Cmplx.inst(), f_var[7])],
[List2(TypeSystem.Real.inst(), f_var[8]), List2(TypeSystem.Real.inst(), f_var[8]),
List2(TypeSystem.Real.inst(), f_var[8])],
[List2(TypeSystem.Cmplx.inst(), f_var[9]), List2(TypeSystem.Real.inst(), f_var[9]),
List2(TypeSystem.Cmplx.inst(), f_var[9])],
[List2(TypeSystem.Real.inst(), f_var[10]), List2(TypeSystem.Cmplx.inst(), f_var[10]),
List2(TypeSystem.Cmplx.inst(), f_var[10])],
[List2(TypeSystem.Cmplx.inst(), f_var[11]),
List2(TypeSystem.Cmplx.inst(), f_var[11]),
List2(TypeSystem.Cmplx.inst(), f_var[11])]],
[[], [], [], [], [FConst([f_var[0]], [None])],
[FConst([f_var[1]], [None])], [FConst([f_var[2]], [None])],
[FConst([f_var[3]], [None])], [FConst([f_var[4]], [None])],
[FConst([f_var[5]], [None])], [FConst([f_var[6]], [None])],
[FConst([f_var[7]], [None])], [FConst([f_var[8]], [None])],
[FConst([f_var[9]], [None])], [FConst([f_var[10]], [None])],
[FConst([f_var[11]], [None])]]))
elif tok_t == Token.List:
for tok in rt.chd:
tok.t_var = self.__new_t_var()
self.__chk_t_hlpr(tok)
if rt.argc == 0:
f_var: FVar = FVar()
self.__t_unify(TConst([rt.t_var], [[List2(TypeSystem.Void.inst(), f_var)]], [[FConst([f_var], [1])]]))
elif rt.argc == 1:
f_var: List[FVar] = self.__new_f_var(4)
self.__t_unify(TConst([rt.t_var, rt.chd[0].t_var],
[[List2(TypeSystem.Real.inst(), f_var[0]), TypeSystem.Real.inst()],
[List2(TypeSystem.Cmplx.inst(), f_var[1]), TypeSystem.Cmplx.inst()],
[List2(TypeSystem.Str.inst(), f_var[2]), TypeSystem.Str.inst()],
[List2(TypeSystem.Bool.inst(), f_var[3]), TypeSystem.Bool.inst()]],
[[FConst([f_var[0]], [1])], [FConst([f_var[1]], [1])], [FConst([f_var[2]], [1])],
[FConst([f_var[3]], [1])]]))
else:
dummy: TVar = self.__new_t_var()
self.__t_unify(TConst([rt.chd[0].t_var, rt.chd[1].t_var, dummy],
[[TypeSystem.Real.inst(), TypeSystem.Real.inst(), TypeSystem.Real.inst()],
[TypeSystem.Real.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Real.inst(), TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst()],
[TypeSystem.Str.inst(), TypeSystem.Str.inst(), TypeSystem.Str.inst()],
[TypeSystem.Bool.inst(), TypeSystem.Bool.inst(), TypeSystem.Bool.inst()]],
[[], [], [], [], [], []]))
for i in range(len(rt.chd) - 2):
prev_dummy: TVar = dummy
dummy = self.__new_t_var()
self.__t_unify(TConst([rt.chd[0].t_var, rt.chd[i + 2].t_var, prev_dummy, dummy],
[[TypeSystem.Real.inst(), TypeSystem.Real.inst(), TypeSystem.Real.inst(),
TypeSystem.Real.inst()],
[TypeSystem.Real.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Real.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Real.inst(), TypeSystem.Real.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Real.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Real.inst(), TypeSystem.Real.inst(), TypeSystem.Cmplx.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Real.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Real.inst(), TypeSystem.Cmplx.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(), TypeSystem.Cmplx.inst(),
TypeSystem.Cmplx.inst()],
[TypeSystem.Str.inst(), TypeSystem.Str.inst(), TypeSystem.Str.inst(),
TypeSystem.Str.inst()],
[TypeSystem.Bool.inst(), TypeSystem.Bool.inst(), TypeSystem.Bool.inst(),
TypeSystem.Bool.inst()]], [[], [], [], [], [], [], [], [], [], []]))
f_var: List[FVar] = self.__new_f_var(4)
self.__t_unify(TConst([rt.t_var, dummy],
[[List2(TypeSystem.Real.inst(), f_var[0]), TypeSystem.Real.inst()],
[List2(TypeSystem.Cmplx.inst(), f_var[1]), TypeSystem.Cmplx.inst()],
[List2(TypeSystem.Str.inst(), f_var[2]), TypeSystem.Str.inst()],
[List2(TypeSystem.Bool.inst(), f_var[3]), TypeSystem.Bool.inst()]],
[[FConst([f_var[0]], [None])], [FConst([f_var[1]], [None])],
[FConst([f_var[2]], [None])], [FConst([f_var[3]], [None])]]))
else:
raise NotImplementedError
def __find_t(self, var: TVar) -> Set[TypeSystem.T]:
for const in self.__t_const:
find: Set[TypeSystem.T] = const[var]
if find:
return find
@classmethod
def inst(cls) -> TChker:
if not cls.__inst:
cls.__inst = TChker()
return cls.__inst
def chk_t(self, expr: AST.AST):
self.__expr = expr
buf: Type.BufT = Type.BufT.DEBUG # Debug buffer.
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('type checking target'), buf)
Printer.Printer.inst().buf(f'@AST: {expr}', buf, indent=2)
Printer.Printer.inst().buf_newline(buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_title('type checking chain'), buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Initializing type checker'), buf, False, 2)
self.__init()
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
Printer.Printer.inst().buf(f'@__const_l: {len(self.__t_const)} (cleared)', buf, indent=4)
Printer.Printer.inst().buf_newline(buf)
Printer.Printer.inst().buf(Printer.Printer.inst().f_prog('Running type checker'), buf, False, 2)
self.__expr.rt.t_var = self.__new_t_var()
self.__chk_t_hlpr(self.__expr.rt)
Printer.Printer.inst().buf(Printer.Printer.inst().f_col('done', Type.Col.BLUE), buf)
Printer.Printer.inst().buf(f'@AST : {expr}', buf, indent=4)
tmp: str = ' or '.join([str(t) for t in self.__find_t(expr.rt.t_var)])
Printer.Printer.inst().buf(f'@inferred: {tmp}', buf, indent=4)
Printer.Printer.inst().buf_newline(buf)
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,550 | eik4862/TinyCalculator | refs/heads/master | /Operator/__init__.py | __all__ = ['Operator', 'Binary', 'Unary', 'Bool', 'Compare', 'Assign', 'Delimiter']
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,551 | eik4862/TinyCalculator | refs/heads/master | /Function/Gamma.py | from typing import final
from Function import Function
class GammaFun(Function.Fun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Gamma(GammaFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class LogGamma(GammaFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Beta(GammaFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,552 | eik4862/TinyCalculator | refs/heads/master | /Operator/Binary.py | from __future__ import annotations
from typing import final, Final, Tuple, Dict, Optional, List
from Core import Token, TypeSystem
from Operator import Operator
class BinOp(Operator.Op):
__ARGC: Final[int] = 2
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) in [TypeSystem.Real, TypeSystem.Cmplx, TypeSystem.Sym]:
rt.t = res_t
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if type(res_t) in [TypeSystem.Real, TypeSystem.Cmplx, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, res_t)
else:
return None
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if type(res_t) in [TypeSystem.Real, TypeSystem.Cmplx, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, res_t)
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if res_t and type(res_t.chd_t) in [TypeSystem.Real, TypeSystem.Cmplx]:
rt.t = res_t
else:
return None
return t_env
@final
class Add(BinOp):
__PRECD: Final[Tuple[int, int]] = (12, 11)
__SYM: Final[str] = '+'
__SGN: Final[List[str]] = ['Real + Real -> Real',
'Cmplx + Cmplx -> Cmplx',
'Sym + Sym -> Sym',
'Real + List of Real (n fold) -> List of Real (n fold)',
'Cmplx + List of Cmplx (n fold) -> List of Cmplx (n fold)',
'List of Real (n fold) + Real -> List of Real (n fold)',
'List of Cmplx (n fold) + Cmplx -> List of Cmplx (n fold)',
'List of Real (n fold) + List of Real (n fold) -> List of Real (n fold)',
'List of Cmplx (n fold) + List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Sub(BinOp):
__PRECD: Final[Tuple[int, int]] = (12, 11)
__SYM: Final[str] = '-'
__SGN: Final[List[str]] = ['Real - Real -> Real',
'Cmplx - Cmplx -> Cmplx',
'Sym - Sym -> Sym',
'Real - List of Real (n fold) -> List of Real (n fold)',
'Cmplx - List of Cmplx (n fold) -> List of Cmplx (n fold)',
'List of Real (n fold) - Real -> List of Real (n fold)',
'List of Cmplx (n fold) - Cmplx -> List of Cmplx (n fold)',
'List of Real (n fold) - List of Real (n fold) -> List of Real (n fold)',
'List of Cmplx (n fold) - List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Mul(BinOp):
__PRECD: Final[Tuple[int, int]] = (14, 13)
__SYM: Final[str] = '*'
__SGN: Final[List[str]] = ['Real * Real -> Real',
'Cmplx * Cmplx -> Cmplx',
'Sym * Sym -> Sym',
'Real * List of Real (n fold) -> List of Real (n fold)',
'Cmplx * List of Cmplx (n fold) -> List of Cmplx (n fold)',
'List of Real (n fold) * Real -> List of Real (n fold)',
'List of Cmplx (n fold) * Cmplx -> List of Cmplx (n fold)',
'List of Real (n fold) * List of Real (n fold) -> List of Real (n fold)',
'List of Cmplx (n fold) * List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class MatMul(BinOp):
__PRECD: Final[Tuple[int, int]] = (14, 13)
__SYM: Final[str] = '%*%'
__SGN: Final[List[str]] = ['Sym %*% Sym -> Sym',
'List of Real (2 fold) %*% List of Real (n fold) -> List of Real (n fold) given that n >= 2',
'List of Cmplx (2 fold) %*% List of Cmplx (n fold) -> List of Cmplx (n fold) given that n >= 2',
'List of Real (n fold) %*% List of Real (2 fold) -> List of Real (n fold) given that n > 2',
'List of Cmplx (n fold) %*% List of Cmplx (2 fold) -> List of Cmplx (n fold) given that n > 2',
'List of Real (n fold) %*% List of Real (n fold) -> List of Real (n fold) given that n > 2',
'List of Cmplx (n fold) %*% List of Cmplx (n fold) -> List of Cmplx (n fold) given that n > 2']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) == TypeSystem.Sym:
rt.t = res_t
else:
return None
else:
if type(t1) == TypeSystem.Sym:
rt.t = t1
else:
return None
else:
if t2.base:
if type(t2) == TypeSystem.Sym:
rt.t = t2
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if res_t and type(res_t.chd_t) in [TypeSystem.Real, TypeSystem.Cmplx]:
if t1.fold > 1:
rt.t = res_t
else:
return None
else:
res_t = TypeSystem.T.supt(t1.chd_t, t2.chd_t)
if type(res_t) in [TypeSystem.Real, TypeSystem.Cmplx]:
if t1.fold == 2 and t2.fold > 2:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, res_t)
elif t1.fold > 2 and t2.fold == 2:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, res_t)
else:
return None
else:
return None
return t_env
@final
class Div(BinOp):
__PRECD: Final[Tuple[int, int]] = (14, 13)
__SYM: Final[str] = '/'
__SGN: Final[List[str]] = ['Real / Real -> Real',
'Cmplx / Cmplx -> Cmplx',
'Sym / Sym -> Sym',
'Real / List of Real (n fold) -> List of Real (n fold)',
'Cmplx / List of Cmplx (n fold) -> List of Cmplx (n fold)',
'List of Real (n fold) / Real -> List of Real (n fold)',
'List of Cmplx (n fold) / Cmplx -> List of Cmplx (n fold)',
'List of Real (n fold) / List of Real (n fold) -> List of Real (n fold)',
'List of Cmplx (n fold) / List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Rem(BinOp):
__PRECD: Final[Tuple[int, int]] = (14, 13)
__SYM: Final[str] = '%'
__SGN: Final[List[str]] = ['Real % Real -> Real',
'Sym % Sym -> Sym',
'Real % List of Real (n fold) -> List of Real (n fold)',
'List of Real (n fold) % Real -> List of Real (n fold)',
'List of Real (n fold) % List of Real (n fold) -> List of Real (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = res_t
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if type(res_t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, res_t)
else:
return None
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if type(res_t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, res_t)
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if res_t and type(res_t.chd_t) == TypeSystem.Real:
rt.t = res_t
else:
return None
return t_env
@final
class Quot(BinOp):
__PRECD: Final[Tuple[int, int]] = (14, 13)
__SYM: Final[str] = '//'
__SGN: Final[List[str]] = ['Real // Real -> Real',
'Sym // Sym -> Sym',
'Real // List of Real (n fold) -> List of Real (n fold)',
'List of Real (n fold) // Real -> List of Real (n fold)',
'List of Real (n fold) // List of Real (n fold) -> List of Real (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = res_t
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if type(res_t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, res_t)
else:
return None
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if type(res_t) in [TypeSystem.Real, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, res_t)
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if res_t and type(res_t.chd_t) == TypeSystem.Real:
rt.t = res_t
else:
return None
return t_env
@final
class Pow(BinOp):
__PRECD: Final[Tuple[int, int]] = (17, 18)
__SYM: Final[str] = '**'
__SGN: Final[List[str]] = ['Real ** Real -> Real',
'Cmplx ** Cmplx -> Cmplx',
'Sym ** Sym -> Sym',
'Real ** List of Real (n fold) -> List of Real (n fold)',
'Cmplx ** List of Cmplx (n fold) -> List of Cmplx (n fold)',
'List of Real (n fold) ** Real -> List of Real (n fold)',
'List of Cmplx (n fold) ** Cmplx -> List of Cmplx (n fold)',
'List of Real (n fold) ** List of Real (n fold) -> List of Real (n fold)',
'List of Cmplx (n fold) ** List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,553 | eik4862/TinyCalculator | refs/heads/master | /Error/Error.py | from __future__ import annotations
from typing import final, Dict, Any, List
from Core import Type, TypeSystem
class Err(Exception):
"""
Base error class.
Do not remove this.
This is used to catch all user-defined errors at once.
"""
def __init__(self, errno: int) -> None:
self.__errno: int = errno
@property
def errno(self) -> int:
return self.__errno
class ParserErr(Err):
def __init__(self, errno: int) -> None:
super().__init__(errno)
class InterpErr(Err):
def __init__(self, errno: int, line: str, pos: int) -> None:
super().__init__(errno)
self.__line: str = line
self.__pos: int = pos
@property
def line(self) -> str:
return self.__line
@property
def pos(self) -> int:
return self.__pos
@final
class SysErr(Err):
"""
System error class.
:ivar __err_type: Error type.
:ivar __extra_info: Extra information.
"""
def __init__(self, err_t: Type.SysErrT, **kwargs: Any) -> None:
super().__init__()
self.__err_t = err_t
self.__extra_info: Dict[str, Any] = kwargs
def __del__(self) -> None:
pass
@property
def err_t(self) -> Type.SysErrT:
"""
Getter for system error type.
:return: System error type.
:rtype: Type.SysErrT
"""
return self.__err_t
@property
def sig(self) -> str:
"""
Getter for signal name at which registering or unregistering handler failed.
:return: Signal name which caused error. None if this information is not given.
:rtype: str
"""
return self.__extra_info.get('sig')
@property
def err_str(self) -> str:
"""
Getter for error string from OS error.
:return: OS error string. None if this information is not given.
:rtype: str
"""
return self.__extra_info.get('err_str')
@property
def iter(self) -> int:
"""
Getter for the # of iteration until timeout.
:return: The # of iteration. None if this information is not given.
:rtype: int
"""
return self.__extra_info.get('iter')
@property
def err_no(self) -> int:
"""
Getter for error code.
:return: Error code. None if this information is not given.
:rtype: int
"""
return self.__extra_info.get('err_no')
@iter.setter
def iter(self, iter: int) -> None:
"""
Setter for the # of iteration until timeout.
:param iter: The # of iteration to set.
:type iter: int
"""
self.__extra_info['iter'] = iter
@err_no.setter
def err_no(self, err_no: int) -> None:
"""
Setter for error code.
:param err_no: Error code to set.
:type err_no: int
"""
self.__extra_info['err_no'] = err_no
@final
class DBErr(Err):
"""
DB error class.
:ivar __err_t: Error type.
:ivar __path: Path of source file where error occurred.
:ivar __err_str: Error string from OS error. (Default: None)
"""
def __init__(self, err_t: Type.DBErrT, path: str, err_str: str = None) -> None:
super().__init__()
self.__err_t: Type.DBErrT = err_t
self.__path: str = path
self.__err_str: str = err_str
def __del__(self) -> None:
pass
@property
def err_t(self) -> Type.DBErrT:
"""
Getter for DB error type.
:return: DB error type.
:rtype: Type.DBErrT
"""
return self.__err_t
@property
def path(self) -> str:
"""
Getter for path of source file where error occurred.
:return: Source file path where error occurred.
:rtype: str
"""
return self.__path
@property
def err_str(self) -> str:
"""
Getter for error string from OS error.
:return: OS error string. None if this information is not given.
:rtype: str
"""
return self.__err_str
#
# @final
# class InterpErr(Err):
# """
# Interpreter error class.
#
# :ivar __err_type: Error type.
# :ivar __err_code: Error code.
# :ivar __line: Raw input which caused error.
# :ivar __pos: Position in raw input where error occurred.
# :ivar __extra_info: Extra information.
# """
#
# def __init__(self, err_t: Type.InterpErrT, err_no: int, line: str, pos: int, **kwargs: Any) -> None:
# super().__init__()
# self.__err_t: Type.InterpErrT = err_t
# self.__err_no: int = err_no
# self.__line: str = line
# self.__pos: int = pos
# self.__extra_info: Dict[str, Any] = kwargs
#
# def __del__(self) -> None:
# pass
#
# @property
# def err_t(self) -> Type.InterpErrT:
# """
# Getter for interpreter error type.
#
# :return: Interpreter error type.
# :rtype: Type.InterpErrT
# """
# return self.__err_t
#
# @property
# def err_no(self) -> int:
# """
# Getter for error code.
#
# :return: Error code.
# :rtype: int
# """
# return self.__err_no
#
# @property
# def line(self) -> str:
# """
# Getter for raw input which caused error.
#
# :return: Erroneous raw input.
# :rtype: str
# """
# return self.__line
#
# @property
# def pos(self) -> int:
# """
# Getter for position in raw input where error occurred.
#
# :return: Position where error occurred.
# :rtype: int
# """
# return self.__pos
#
# @property
# def wrong_t(self) -> TypeSystem.T:
# """
# Getter for erroneous inferred type.
#
# :return: Erroneous inferred type.
# :rtype: TypeSystem.T
# """
# return self.__extra_info.get('wrong_t')
#
# @property
# def right_t(self) -> TypeSystem.T:
# """
# Getter for correct type.
#
# :return: Correct type.
# :rtype: TypeSystem.T
# """
# return self.__extra_info.get('right_t')
#
@final
class UtilErr(Err):
"""
Utility command error class.
:ivar __err_type: Error type.
:ivar __err_code: Error code.
:ivar __extra_info: Extra information.
"""
def __init__(self, err_t: Type.UtilErrT, err_no: int = 0, **kwargs: Any) -> None:
super().__init__()
self.__err_t: Type.UtilErrT = err_t
self.__err_no: int = err_no
self.__extra_info: Dict[str, Any] = kwargs
def __del__(self) -> None:
pass
@property
def t(self) -> Type.UtilErrT:
"""
Getter for utility command error type.
:return: Utility command error type.
:rtype: Type.UtilErrT
"""
return self.__err_t
@property
def id(self) -> str:
"""
Getter for erroneous system variable id.
:return: Erroneous system variable id.
:rtype: str
"""
return self.__extra_info.get('id')
@property
def err_no(self) -> int:
"""
Getter for error code.
:return: Error code.
:rtype: int
"""
return self.__err_no
@property
def wrong_t(self):
return self.__extra_info.get('wrong_t')
@property
def correct_t(self):
return self.__extra_info.get('correct_t')
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,554 | eik4862/TinyCalculator | refs/heads/master | /Error/__init__.py | __all__ = ['Error', 'ParserError', 'InterpreterError']
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,555 | eik4862/TinyCalculator | refs/heads/master | /Util/Macro.py | from sys import float_info
"""
Simple macros.
"""
def is_white(c: str) -> bool:
"""
Check whether input character is whitespace or not.
Here, whitespace includes space, tab, and newline characters.
:param c: Character to be checked.
:type c: str
:return: True if the input character is whitespace. False otherwise.
:rtype: bool
"""
return c in [' ', '\t', '\n']
def is_digit(c: str) -> bool:
"""
Check whether input character is digit or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is digit. False otherwise.
:rtype: bool
"""
return '0' <= c <= '9'
def is_dot(c: str) -> bool:
"""
Check whether input character is decimal point or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is decimal point. False otherwise.
:rtype: bool
"""
return c == '.'
def is_alpha(c: str) -> bool:
"""
Check whether input character is alphabet or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is alphabet. False otherwise.
:rtype: bool
"""
return 'a' <= c <= 'z' or 'A' <= c <= 'Z'
def is_underscore(c: str) -> bool:
"""
Check whether input character is underscore or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is underscore. False otherwise.
:rtype: bool
"""
return c == '_'
def is_quote(c: str) -> bool:
"""
Check whether input character is (double) quote or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is quote. False otherwise.
:rtype: bool
"""
return c == '"'
def is_comment(c: str) -> bool:
"""
Check whether input character is comment delimiter # or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is comment delimiter. False otherwise.
:rtype: bool
"""
return c == '#'
def is_newline(c: str) -> bool:
"""
Check whether input character is newline character or not.
:param c: Character to be checked.
:type c: str
:return: True if the input character is newline character. False otherwise.
:rtype: bool
"""
return c == '\n'
def is_tag(c: str) -> bool:
"""
Check whether input character is tag delimiter $ or not.
Here, whitespace includes space, tab, and newline characters.
:param c: Character to be checked.
:type c: str
:return: True if the input character is tag delimiter. False otherwise.
:rtype: bool
"""
return c == '$'
def is_bigint(n: int) -> bool:
"""
Check whether input integer is so-called big integer which is too big to be casted to float.
:param n: Integer to be checked.
:type n: int
:return: True if it is big integer which is too big. False otherwise.
:rtype: bool
"""
return isinstance(n, int) and n > float_info.max
def is_smallint(n: int) -> bool:
"""
Check whether input integer is so-called big integer which is too small to be casted to float.
:param n: Integer to be checked.
:type n: int
:return: True if it is big integer which is too small. False otherwise.
:rtype: bool
"""
return isinstance(n, int) and n < -float_info.max
def is_int(x: float) -> bool:
return x % 1 == 0
def is_exp(c: str) -> bool:
return c == 'e'
def is_sgn(c: str) -> bool:
return c in ['-', '+']
def is_imag(c: str) -> bool:
return c == 'j'
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,556 | eik4862/TinyCalculator | refs/heads/master | /Core/Polynomial.py | from typing import final, List, Tuple
from functools import reduce
class Poly:
def __init__(self) -> None:
pass
def __del__(self) -> None:
pass
@final
class NilPoly(Poly):
def __init__(self, var: int):
super().__init__()
self.__var: int = var
@final
class UniPoly(Poly):
def __init__(self, var: int, coef: List[int]) -> None:
super().__init__()
self.__var: int = var
self.__coef: List[int] = coef
self.__deg: int = len(self.__coef) - 1
def __del__(self) -> None:
pass
@final
class UniSparPoly(Poly):
def __init__(self, var: int, coef: List[Tuple[int, int]]) -> None:
super().__init__()
self.__var: int = var
self.__coef: List[Tuple[int, int]] = coef
self.__deg: int = coef[-1][1]
def __del__(self) -> None:
pass
@final
class MultiPoly(Poly):
def __init__(self, var: List[int], coef: list, deg: int) -> None:
super().__init__()
self.__var: List[int] = var
self.__coef: list = coef
self.__deg: int = deg
def __del__(self) -> None:
pass
@final
class MultiSparPoly(Poly):
def __init__(self, var: List[int], coef: List[int]) -> None:
super().__init__()
self.__var: int = var
self.__coef: List[Tuple[int, List[int]]] = coef
self.__deg: int = max(deg)
def __del__(self) -> None:
pass
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,557 | eik4862/TinyCalculator | refs/heads/master | /Function/Integer.py | from typing import final
from Function import Function
class IntFun(Function.Fun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Ceil(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Floor(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Round(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class IntPart(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class IntPart(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class FracPart(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Abs(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@final
class Sgn(IntFun):
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,558 | eik4862/TinyCalculator | refs/heads/master | /Function/SpecialFunction.py | from typing import Dict, List, final
from Core import Type
@final
class SpecialFun:
"""
Special function toolbox.
:cvar __sign: Signatures of special functions.
"""
def __new__(cls) -> None:
raise NotImplementedError
#
#
# @classmethod
# def __erf(cls, x: float) -> float:
# """
# Error function.
#
# Error function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is +-1, resp.
# 3. If x is finite, the result is ``math.erf(x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where error function is to be computed.
# :type x: float
#
# :return: Computed value of error function.
# :rtype: float
# """
# return math.erf(x)
#
# @classmethod
# def __erfc(cls, x: float) -> float:
# """
# Complementary error function.
#
# Complementary error function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0, 2, resp.
# 3. If x is finite, the result is ``math.erfc(x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where complementary error function is to be computed.
# :type x: float
#
# :return: Computed value of complementary error function.
# :rtype: float
# """
# return math.erfc(x)
#
# @classmethod
# def __gamma(cls, x: float) -> float:
# """
# Gamma function.
#
# Gamma function with parameter x has following computation rules.
# 1. If x is nan or -inf, the result is nan.
# 2. If x is +inf, the result is +inf.
# 3. If x is finite nonpositive integer, the result is nan.
# 4. If x is finite, the result is ``math.gamma(x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where gamma function is to be computed.
# :type x: float
#
# :return: Computed value of gamma function.
# :rtype: float
# """
# if x == -math.inf or (is_int(x) and x <= 0):
# return math.nan
# else:
# try:
# return math.gamma(x)
# except OverflowError:
# return math.inf
#
# @classmethod
# def __lgamma(cls, x: float) -> float:
# """
# Log gamma function.
#
# Log gamma function with parameter x has following computation rules.
# 1. If x is nan or -inf, the result is nan.
# 2. If x is +inf, the result is +inf.
# 3. If x is finite nonpositive integer, the result if nan.
# 4. If x is finite, the result is ``math.lgamma(x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where log gamma function is to be computed.
# :type x: float
#
# :return: Computed value of log gamma function.
# :rtype: float
# """
# return math.nan if x == -math.inf or (is_int(x) and x <= 0) else math.lgamma(x)
#
# @classmethod
# def __recigamma(cls, x: float) -> float:
# """
# Reciprocal gamma function.
#
# Reciprocal gamma function with parameter x has following computation rules.
# 1. If x is nan or -inf, the result is nan.
# 2. If x is +inf, the result is 0.
# 3. If x is finite, the result is ``1 / math.gamma(x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where reciprocal gamma function is to be computed.
# :type x: float
#
# :return: Computed value of reciprocal gamma function.
# :rtype: float
# """
# if math.isinf(x):
# return math.nan if x < 0 else 0
# elif is_int(x) and x <= 0:
# return 0
# else:
# try:
# return 1 / math.gamma(x)
# except OverflowError:
# return 0
#
# @classmethod
# def __bessel_clifford(cls, x: float) -> float:
# """
# Bessel-Clifford function.
#
# Bessel-Clifford function with parameter x has following computation rules.
# 1. If x is nan or -inf, the result is nan.
# 2. If x is +inf, the result is 0.
# 3. If x is finite, the result is ``1 / math.gamma(x + 1)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where Bessel-Clifford function is to be computed.
# :type x: float
#
# :return: Computed value of Bessel-Clifford function.
# :rtype: float
# """
# if math.isinf(x):
# return math.nan if x < 0 else 0
# elif is_int(x) and x < 0:
# return 0
# else:
# try:
# return 1 / math.gamma(x + 1)
# except OverflowError:
# return 0
#
# @classmethod
# def __beta(cls, x: float, y: float) -> float:
# """
# Beta function.
#
# Beta function with parameter x and y has following computation rules.
# 1. If x or y is nan, -inf, or finite nonpositive integer, the result is nan.
# 2. If x and y are both inf, the result is 0.
# 3. If x is inf and y is either finite positive or in (2n, 2n+1) for some finite negative integer n, the
# result is inf.
# 4. If x is inf and y is in (2n-1, 2n) for some finite nonpositive integer n, the result is -inf.
# 5. If y is inf and x is either finite positive or in (2n, 2n+1) for some finite negative integer n, the
# result is inf.
# 6. If y is inf and x is in (2n-1, 2n) for some finite nonpositive integer n, the result is -inf.
# 7. If x and y are both either finite positive or finite negative noninteger, the result is
# ``math.exp(math.lgamma(x) + math.lgamma(y) - math.lgamma(x + y))`` multiplied by proper sign.
# Here, rule 7 is based on the identity ``B(x, y) = gamma(x) * gamma(y) / gamma(x + y)``.
# Since this identity is vulnerable to overflow, we take logarithm and take exponential again.
# Before taking logarithm, we must take absolute value first and the lost sign information can be recovered
# latter using the fact that ``gamma(x)`` is positive iff x is finite positive or in (2n, 2n+1) for some finite
# negative integer n.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: First coordinate of point where beta function is to be computed.
# :type x: float
# :param y: Second coordinate of point where beta function is to be computed.
# :type y: float
#
# :return: Computed value of beta function.
# :rtype: float
# """
# if ((is_int(x) or math.isinf(x)) and x <= 0) or ((is_int(y) or math.isinf(y)) and y <= 0):
# return math.nan
# elif math.isinf(x):
# return 0 if math.isinf(y) else math.inf if y > 0 or math.ceil(y) % 2 == 1 else -math.inf
# elif math.isinf(y):
# return math.inf if x > 0 or math.ceil(x) % 2 == 1 else -math.inf
# elif is_int(x + y) and (x + y) <= 0:
# return 0
# else:
# sgn = 1
#
# if x < 0 and math.ceil(x) % 2 == 0:
# sgn *= -1
#
# if y < 0 and math.ceil(y) % 2 == 0:
# sgn *= -1
#
# if x + y < 0 and math.ceil(x + y) % 2 == 0:
# sgn *= -1
#
# try:
# return sgn * math.exp(math.lgamma(x) + math.lgamma(y) - math.lgamma(x + y))
# except OverflowError:
# return sgn * math.inf
#
# @classmethod
# def __cenbeta(cls, x: float) -> float:
# """
# Central beta function.
#
# Central beta function with parameter x has following computation rules.
# 1. If x is nan or -inf, the result is nan.
# 2. If x is +inf, the result is 0.
# 3. If x is finite nonpositive integer, the result is nan.
# 4. If x is either finite positive or finite negative nonintger, the result is
# ``math.exp(math.lgamma(x) * 2 - math.lgamma(2 * x))``.
# Here, rule 4 is based on the identity ``B(x, x) = gamma(x)^2 / gamma(2x)``.
# Since this identity is vulnerable to overflow, we take logarithm and take exponential again.
# Before taking logarithm, we must take absolute value first and the lost sign information can be recovered
# latter using the fact that ``gamma(x)`` is positive iff x is finite positive or in (2n, 2n+1) for some finite
# negative integer n.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where central beta function is to be computed.
# :type x: float
#
# :return: Computed value of central beta function.
# :rtype: float
# """
# if x == math.inf:
# return 0
# elif is_int(x) and x <= 0:
# return math.nan
# elif 2 * is_int(x) and x <= 0:
# return 0
# else:
# sgn = 1 if x > 0 or x % 1 < 0.5 else -1
#
# try:
# return sgn * math.exp(math.lgamma(x) * 2 - math.lgamma(2 * x))
# except OverflowError:
# return sgn * math.inf
#
# @classmethod
# def __sinc(cls, x: float) -> float:
# """
# Sinc function.
#
# Sinc function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0.
# 3. If x is 0, the result is 1.
# 4. If x is finite nonzero, the result is ``math.sin(x) / x``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where sinc function is to be computed.
# :type x: float
#
# :return: Computed value of sinc function.
# :rtype: float
# """
# return 1 if x == 0 else math.sin(x) / x
#
# @classmethod
# def __tanc(cls, x: float) -> float:
# """
# Tanc function.
#
# Tanc function with parameter x has following computation rules.
# 1. If x is nan or +-inf, the result is nan.
# 2. If x is finite integer multiple of pi + pi/2, the result is nan.
# 3. If x is 0, the result is 1.
# 4. If x is finite nonzero and not integer multiple of pi + pi/2, the result is ``math.tan(x) / x``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where tanc function is to be computed.
# :type x: float
#
# :return: Computed value of tanc function.
# :rtype: float
# """
# return math.nan if math.isinf(x) or (x - math.pi / 2) % math.pi == 0 else 0 if x == 0 else math.tan(x) / x
#
# @classmethod
# def __sinhc(cls, x: float) -> float:
# """
# Sinhc function.
#
# Sinhc function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is inf.
# 3. If x is 0, the result is 1.
# 4. If x is finite nonzero, the result is ``math.sinh(x) / x``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where sinhc function is to be computed.
# :type x: float
#
# :return: Computed value of sinhc function.
# :rtype: float
# """
# try:
# return 1 if x == 0 else math.sinh(x) / x
# except OverflowError:
# return math.inf
#
# @classmethod
# def __coshc(cls, x: float) -> float:
# """
# Coshc function.
#
# Coshc function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is +-inf, resp.
# 3. If x is 0, the result is nan.
# 4. If x is finite nonzero, the result is ``math.cosh(x) / x``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where coshc function is to be computed.
# :type x: float
#
# :return: Computed value of coshc function.
# :rtype: float
# """
# try:
# return math.nan if x == 0 else math.cosh(x) / x
# except OverflowError:
# return math.inf if x > 0 else -math.inf
#
# @classmethod
# def __tanhc(cls, x: float) -> float:
# """
# Tanhc function.
#
# Tanhc function with parameter x has following computation rules.
# 1. If x is nan, the result is nan.
# 2. If x is +-inf, the result is 0≥
# 3. If x is 0, the result is 1.
# 4. If x is finite nonzero, the result is ``math.tanh(x) / x``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where tanhc function is to be computed.
# :type x: float
#
# :return: Computed value of tanhc function.
# :rtype: float
# """
# return 1 if x == 0 else math.tanh(x) / x
#
# @classmethod
# def __dirichlet_ker(cls, x: float, n: int) -> float:
# """
# Dirichlet kernel function.
#
# Dirichlet kernel function with parameter x and n has following computation rules.
# 1. If x is nan or +-inf, the result is nan.
# 2. If n is nan or +-inf, the result is nan.
# 3. If n is not nonnegative integer, the result is nan.
# 4. If n is 0, the result is 0.
# 5. If x is finite and n is finite positive integer, the result is
# ``math.sin((n + 0.5) * x) / math.sin(x / 2)``.
# Here, rule 4 is based on the identity ``D_n(x) = sin((n + 1 / 2)x) / sin(x / 2)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where Dirichlet kernel function is to be computed.
# :type x: float
#
# :return: Computed value of Dirichlet kernel function.
# :rtype: float
# """
# if not math.isfinite(x + n) or n < 0 or not is_int(n):
# return math.nan
# else:
# return 1 if n == 0 else 2 * n + 1 if x % (2 * math.pi) == 0 else math.sin((n + 0.5) * x) / math.sin(x / 2)
#
# @classmethod
# def __fejer_ker(cls, x: float, n: int) -> float:
# """
# Fejer kernel function.
#
# Fejer kernel function with parameter x and n has following computation rules.
# 1. If x is nan or +-inf, the result is nan.
# 2. If n is nan or +-inf, the result is nan.
# 3. If n is not positive integer, the result is nan.
# 4. If n is 1, the result is 1.
# 5. If x is finite and n is finite positive integer, the result is
# ``(1 - math.cos(n * x)) / (1 - math.cos(x)) / n``.
# Here, rule 4 is based on the identity ``F_n(x) = gamma(x)^2 / gamma(2x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where Fejer kernel function is to be computed.
# :type x: float
#
# :return: Computed value of Fejer kernel function.
# :rtype: float
# """
# if not math.isfinite(x + n) or n <= 0:
# return math.nan
# else:
# return 1 if n == 1 else n if x % (2 * math.pi) == 0 else (1 - math.cos(n * x)) / (1 - math.cos(x)) / n
#
# @classmethod
# def __topo_sin(cls, x: float) -> float:
# """
# Topologist sine function.
#
# Topologist sine function with parameter x has following computation rules.
# 1. If x is nan or -inf, the result is nan.
# 2. If x is inf, the result is 0.
# 3. If x is finite nonpositive, the result is nan.
# 4. If x is finite positive, the result is ``math.sin(1 / x)``.
#
# This method is private and called internally as a helper of ``SpeiclFun.simplify``.
# For detailed description for simplification, refer to the comments of ``SpeiclFun.simplify``.
#
# :param x: Point where topologist sine function is to be computed.
# :type x: float
#
# :return: Computed value of topologist sine function.
# :rtype: float
# """
# return math.nan if x <= 0 else math.sin(1 / x)
#
# @classmethod
# def chk_t(cls, rt: Token.Fun) -> Optional[List[Type.Sign]]:
# """
# Type checker for special functions.
# It checks type of input function token and assigns return type as type information of the token.
#
# :param rt: Token to be type checked.
# :type rt: Token.Fun
#
# :return: None if type check is successful. Candidate signatures if not.
# :rtype: Optional[List[Type.Signature]]
# """
# cand: List[Type.Sign] = cls.__sign.get(rt.v) # Candidate signatures
# infer: Type.Sign = Type.Sign([tok.t for tok in rt.chd], Type.T.REAL, rt.v) # Inferred signature
#
# # Inferred signature must be one of candidates and return type is NUM type.
# if infer in cand:
# rt.t = Type.T.REAL
#
# return None
# else:
# return cand
#
# @classmethod
# def simplify(cls, rt: Token.Fun) -> Tuple[Token.Tok, List[Warning.InterpWarn]]:
# """
# Simplifier for special functions.
#
# It does following simplifications.
# 1. Constant folding.
# 2. Dead expression stripping.
# 3. Sign propagation.
# For details and detailed explanation of these optimization tricks, refer to the comments of
# ``Operator.simplify`` and references therein.
#
# :param rt: Root of AST to be simplified.
# :type rt: Token.Fun
#
# :return: Root of simplified AST and list of generated warnings.
# :rtype: Tuple[Token.Tok, List[Warning.InterpWarn]]
#
# :raise NAN_DETECT: If nan is detected as a given parameter.
# :raise IFN_DETECT: If inf is detected as a given parameter.
# :raise DOMAIN_OUT: If given parameter is not in domain.
# :raise POLE_DETECT: If mathematical pole is detected.
# :raise BIG_INT: If given parameter exceeds floating point max.
# :raise SMALL_INT: If given parameter exceeds floating point min.
# """
# warn: List[Warning.InterpWarn] = [] # List of generated warnings.
#
# if rt.v == Type.FunT.Erf:
# # Check for warnings.
# # Error function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Erf'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Erf'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Erf'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Erf'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__erf``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__erf(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Erf[-x] = -Erf[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.Erfc:
# # Check for warnings.
# # Complementary error function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Erfc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Erfc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Erfc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Erfc'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__erfc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__erfc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.Gamma:
# # Check for warnings.
# # Gamma function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is finite nonpositive integer. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Gamma'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Gamma'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Gamma'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Gamma'))
# elif is_int(rt.chd[0].v) and rt.chd[0].v <= 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 23))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__gamma``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__gamma(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.LogGamma:
# # Check for warnings.
# # Loggamma function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is finite nonpositive integer. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Lgamma'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Lgamma'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Lgamma'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Lgamma'))
# elif is_int(rt.chd[0].v) and rt.chd[0].v <= 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 24))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__lgamma``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__lgamma(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.RECIGAMMA:
# # Check for warnings.
# # Reciprocal gamma function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Recigamma'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Recigamma'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Recigamma'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Recigamma'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__recigamma``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__recigamma(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.BESSELCLIFFORD:
# # Check for warnings.
# # Bessel-Clifford function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Besselclifford'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Besselclifford'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Besselclifford'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Besselclifford'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__bessel_clifford``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__bessel_clifford(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.Beta:
# # Check for warnings.
# # Beta function with parameter x and y generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 1. y exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 5. y is nan. (NAN_DETECT)
# # 6. y is +-inf. (INF_DETECT)
# # 7. x is finite nonpositive integer and y is finite. (POLE_DETECT)
# # 8. x is finite and y is finite nonpositive integer. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Beta'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Beta'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Beta'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Beta'))
#
# if is_bigint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=2, handle='Beta'))
# rt.chd[1].v = math.inf
# elif is_smallint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=2, handle='Beta'))
# rt.chd[1].v = -math.inf
# elif math.isnan(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=2, handle='Beta'))
# elif math.isinf(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=2, handle='Beta'))
#
# if math.isfinite(rt.chd[0].v + rt.chd[1].v):
# if rt.chd[0].v <= 0 and is_int(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 31))
# elif rt.chd[1].v <= 0 and is_int(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 31))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__beta``.
# if rt.chd[0].tok_t == rt.chd[1].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__beta(rt.chd[0].v, rt.chd[1].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Beta[nan, y] = nan
# # 2. Beta[-inf, y] = nan
# # 3. Beta[n, y] = nan
# # 4. Beta[x, nan] = nan
# # 5. Beta[x, -inf] = nan
# # 6. Beta[x, n] = nan
# # where n is finite nonpositive integer.
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM and not is_bigint(rt.chd[0].v):
# if is_smallint(rt.chd[0].v) or math.isnan(rt.chd[0].v) or rt.chd[0].v == -math.inf or \
# (is_int(rt.chd[0].v) and rt.chd[0].v <= 0):
# rt.chd[0].v = math.nan
#
# return rt.chd[0], warn
# elif rt.chd[1].tok_t == Type.TokT.NUM and not is_bigint(rt.chd[1].v):
# if is_smallint(rt.chd[1].v) or math.isnan(rt.chd[1].v) or rt.chd[1].v == -math.inf or \
# (is_int(rt.chd[1].v) and rt.chd[1].v <= 0):
# rt.chd[1].v = math.nan
#
# return rt.chd[1], warn
#
# return rt, warn
# elif rt.v == Type.FunT.CENTRALBETA:
# # Check for warnings.
# # Central beta function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is finite nonpositive integer. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Centralbeta'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Centralbeta'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Centralbeta'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Centralbeta'))
# elif is_int(rt.chd[0].v) and rt.chd[0].v <= 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 40))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__cenbeta``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__cenbeta(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
# elif rt.v == Type.FunT.Sinc:
# # Check for warnings.
# # Sinc function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Sinc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Sinc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Sinc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Sinc'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__sinc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__sinc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Sinc[-x] = Sinc[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.TANC:
# # Check for warnings.
# # Tanc function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is integer multiple of pi + pi/2. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Tanc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Tanc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Tanc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Tanc'))
# elif (rt.chd[0].v - math.pi / 2) % math.pi == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 41))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__tanc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__tanc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Tanc[-x] = Tanc[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.SINHC:
# # Check for warnings.
# # Sinhc function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Sinhc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Sinhc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Sinhc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Sinhc'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__sinhc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__sinhc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Sinhc[-x] = Sinhc[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.COSHC:
# # Check for warnings.
# # Coshc function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is 0. (POLE_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Coshc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Coshc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Coshc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Coshc'))
# elif rt.chd[0].v == 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.POLE_DETECT, 42))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__coshc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__coshc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Sign propagation.
# # For sign propagation, it uses following rule.
# # 1. Coshc[-x] = -Coshc[x]
# # The following logic is an implementation of this rule.
# if rt.chd[0].v == Type.OpT.MINUS:
# tmp = rt.chd[0]
# rt.swap_chd(rt.chd[0].chd[0], 0)
# tmp.swap_chd(rt, 0)
#
# return tmp, warn
#
# return rt, warn
# elif rt.v == Type.FunT.TANHC:
# # Check for warnings.
# # Tanhc function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Tanhc'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Tanhc'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Tanhc'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Tanhc'))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__tanhc``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__tanhc(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Tanhc[-x] = Tanhc[x]
# # The following logic is an implementation of these rules.
# if rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.DIRICHLETKERNEL:
# # Check for warnings.
# # Dirichlet kernel with parameter x and n generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. n exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 5. n is nan. (NAN_DETECT)
# # 6. n is +-inf. (INF_DETECT)
# # 7. n is not nonnegative integer. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Dirichletkernel'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Dirichletkernel'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Dirichletkernel'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Dirichletkernel'))
#
# if is_bigint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=2, handle='Dirichletkernel'))
# rt.chd[1].v = math.inf
# elif is_smallint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=2, handle='Dirichletkernel'))
# rt.chd[1].v = -math.inf
# elif math.isnan(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=2, handle='Dirichletkernel'))
# elif math.isinf(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=2, handle='Dirichletkernel'))
# elif rt.chd[1].v < 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 44))
# elif not is_int(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 45))
# rt.chd[1].v = round(rt.chd[1].v)
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__dirichlet_ker``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__dirichlet_ker(rt.chd[0].v, rt.chd[1].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Dirichletkernel[nan, n] = nan
# # 2. Dirichletkernel[+-inf, n] = nan
# # 3. Dirichletkernel[x, nan] = nan
# # 4. Dirichletkernel[x, +-inf] = nan
# # 5. Dirichletkernel[x, y] = nan
# # 6. Dirichletkernel[-x, n] = Dirichletkernel[x, n]
# # where y is finite negative.
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v) or is_smallint(rt.chd[0].v) or math.isnan(rt.chd[0].v) or \
# math.isinf(rt.chd[0].v):
# rt.chd[0].v = math.nan
#
# return rt.chd[0], warn
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].v) or is_smallint(rt.chd[1].v) or math.isnan(rt.chd[1].v) or \
# math.isinf(rt.chd[1].v) or rt.chd[1].v < 0:
# rt.chd[1].v = math.nan
#
# return rt.chd[1], warn
# elif rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# elif rt.v == Type.FunT.FEJERKERNEL:
# # Check for warnings.
# # Fejer kernel with parameter x and n generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. n exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 5. n is nan. (NAN_DETECT)
# # 6. n is +-inf. (INF_DETECT)
# # 7. n is not positive integer. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Fejerkernel'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Fejerkernel'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Fejerkernel'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Fejerkernel'))
#
# if is_bigint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=2, handle='Fejerkernel'))
# rt.chd[1].v = math.inf
# elif is_smallint(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=2, handle='Fejerkernel'))
# rt.chd[1].v = -math.inf
# elif math.isnan(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=2, handle='Fejerkernel'))
# elif math.isinf(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=2, handle='Fejerkernel'))
# elif rt.chd[1].v < 0.5:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 46))
# elif not is_int(rt.chd[1].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 47))
# rt.chd[1].v = round(rt.chd[1].v)
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__fejer_ker``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__fejer_ker(rt.chd[0].v, rt.chd[1].v)
#
# return rt.chd[0], warn
#
# # Dead expr stripping.
# # For dead expr stripping, it uses following rules.
# # 1. Fejerkernel[nan, n] = nan
# # 2. Fejerkernel[+-inf, n] = nan
# # 3. Fejerkernel[x, nan] = nan
# # 4. Fejerkernel[x, +-inf] = nan
# # 5. Fejerkernel[x, y] = nan
# # 6. Fejerkernel[-x, n] = Fejerkernel[x, n]
# # where y is finite real less than 0.5.
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v) or is_smallint(rt.chd[0].v) or math.isnan(rt.chd[0].v) or \
# math.isinf(rt.chd[0].v):
# rt.chd[0].v = math.nan
#
# return rt.chd[0], warn
# elif rt.chd[1].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[1].v) or is_smallint(rt.chd[1].v) or math.isnan(rt.chd[1].v) or \
# math.isinf(rt.chd[1].v) or rt.chd[1].v < 0.5:
# rt.chd[1].v = math.nan
#
# return rt.chd[1], warn
# elif rt.chd[0].v == Type.OpT.MINUS:
# rt.swap_chd(rt.chd[0].chd[0], 0)
#
# return rt, warn
#
# return rt, warn
# else:
# # Check for warnings.
# # Topologist's sine function with parameter x generates warning for followings cases.
# # 1. x exceeds floating point max/min size. (BIG_INT/SMALL_INT, resp.)
# # 2. x is nan. (NAN_DETECT)
# # 3. x is +-inf. (INF_DETECT)
# # 4. x is finite nonpositive. (DOMAIN_OUT)
# # The following logic is an implementation of these rules.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# if is_bigint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.BIG_INT, 15, arg_pos=1, handle='Topologistsin'))
# rt.chd[0].v = math.inf
# elif is_smallint(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.SMALL_INT, 16, arg_pos=1, handle='Topologistsin'))
# rt.chd[0].v = -math.inf
# elif math.isnan(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.NAN_DETECT, 1, arg_pos=1, handle='Topologistsin'))
# elif math.isinf(rt.chd[0].v):
# warn.append(Warning.InterpWarn(Type.InterpWarnT.INF_DETECT, 2, arg_pos=1, handle='Topologistsin'))
# elif rt.chd[0].v <= 0:
# warn.append(Warning.InterpWarn(Type.InterpWarnT.DOMAIN_OUT, 43))
#
# # Constant folding.
# # For detailed computation rule, refer to the comment in ``SpecialFun.__topo_sin``.
# if rt.chd[0].tok_t == Type.TokT.NUM:
# rt.chd[0].v = cls.__topo_sin(rt.chd[0].v)
#
# return rt.chd[0], warn
#
# return rt, warn
#
# @classmethod
# def test(cls, fun: Type.FunT, test_in: List[List[Decimal]]) -> List[Decimal]:
# """
# Test function for special function.
#
# It just call corresponding target function and evaluate it at test input points.
#
# :param fun: Function to be tested.
# :type fun: Type.FunT
# :param test_in: Test input.
# :type test_in: List[List[Decimal]]
#
# :return: Test output.
# :rtype: List[Decimal]
# """
# if fun == Type.FunT.Erf:
# return list(map(lambda x: Decimal(cls.__erf(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Erfc:
# return list(map(lambda x: Decimal(cls.__erfc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Gamma:
# return list(map(lambda x: Decimal(cls.__gamma(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.LogGamma:
# return list(map(lambda x: Decimal(cls.__lgamma(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.RECIGAMMA:
# return list(map(lambda x: Decimal(cls.__recigamma(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.BESSELCLIFFORD:
# return list(map(lambda x: Decimal(cls.__bessel_clifford(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Beta:
# return list(map(lambda x: Decimal(cls.__beta(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.CENTRALBETA:
# return list(map(lambda x: Decimal(cls.__cenbeta(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.Sinc:
# return list(map(lambda x: Decimal(cls.__sinc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.TANC:
# return list(map(lambda x: Decimal(cls.__tanc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.SINHC:
# return list(map(lambda x: Decimal(cls.__sinhc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.COSHC:
# return list(map(lambda x: Decimal(cls.__coshc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.TANHC:
# return list(map(lambda x: Decimal(cls.__tanhc(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.DIRICHLETKERNEL:
# return list(map(lambda x: Decimal(cls.__dirichlet_ker(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.FEJERKERNEL:
# return list(map(lambda x: Decimal(cls.__fejer_ker(*list(map(float, x)))), test_in))
# elif fun == Type.FunT.TOPOLOGISTSIN:
# return list(map(lambda x: Decimal(cls.__topo_sin(*list(map(float, x)))), test_in))
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,559 | eik4862/TinyCalculator | refs/heads/master | /Operator/Compare.py | from __future__ import annotations
from typing import final, Final, Tuple, Dict, Optional, List
from Core import Token, TypeSystem
from Operator import Operator
class CompOp(Operator.Op):
__ARGC: Final[int] = 2
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if not res_t or type(res_t) in [TypeSystem.Cmplx, TypeSystem.Void]:
return None
elif type(res_t) == TypeSystem.Sym:
rt.t = res_t
else:
rt.t = TypeSystem.Bool.inst()
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if not res_t or type(res_t) in [TypeSystem.Cmplx, TypeSystem.Void]:
return None
elif type(res_t) == TypeSystem.Sym:
rt.t = res_t
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, TypeSystem.Bool.inst())
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if not res_t or type(res_t) in [TypeSystem.Cmplx, TypeSystem.Void]:
return None
elif type(res_t) == TypeSystem.Sym:
rt.t = res_t
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, TypeSystem.Bool.inst())
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if not res_t or type(res_t.chd_t) in [TypeSystem.Cmplx, TypeSystem.Void]:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, TypeSystem.Bool.inst())
return t_env
@final
class Eq(CompOp):
__PRECD: Final[Tuple[int, int]] = (8, 7)
__SYM: Final[str] = '=='
__SGN: Final[List[str]] = ['Real == Real -> Bool',
'Cmplx == Cmplx -> Bool',
'Str == Str -> Bool',
'Bool == Bool -> Bool',
'Sym == Sym -> Bool',
'Real == List of Real (n fold) -> List of Bool (n fold)',
'Cmplx == List of Cmplx (n fold) -> List of Bool (n fold)',
'Str == List of Str (n fold) -> List of Bool (n fold)',
'Bool == List of Bool (n fold) -> List of Bool (n fold)',
'List of Real (n fold) == Real -> List of Bool (n fold)',
'List of Cmplx (n fold) == Cmplx -> List of Bool (n fold)',
'List of Str (n fold) == Str -> List of Bool (n fold)',
'List of Bool (n fold) == Bool -> List of Bool (n fold)',
'List of Real (n fold) == List of Real (n fold) -> List of Bool (n fold)',
'List of Cmplx (n fold) == List of Cmplx (n fold) -> List of Bool (n fold)',
'List of Str (n fold) == List of Str (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) == List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if not res_t or type(res_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.Bool.inst()
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if not res_t or type(res_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, TypeSystem.Bool.inst())
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if not res_t or type(res_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, TypeSystem.Bool.inst())
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if not res_t or type(res_t.chd_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, TypeSystem.Bool.inst())
return t_env
@final
class Diff(CompOp):
__PRECD: Final[Tuple[int, int]] = (8, 7)
__SYM: Final[str] = '!='
__SGN: Final[List[str]] = ['Real == Real -> Bool',
'Cmplx == Cmplx -> Bool',
'Str == Str -> Bool',
'Bool == Bool -> Bool',
'Sym == Sym -> Sym',
'Real == List of Real (n fold) -> List of Bool (n fold)',
'Cmplx == List of Cmplx (n fold) -> List of Bool (n fold)',
'Str == List of Str (n fold) -> List of Bool (n fold)',
'Bool == List of Bool (n fold) -> List of Bool (n fold)',
'List of Real (n fold) == Real -> List of Bool (n fold)',
'List of Cmplx (n fold) == Cmplx -> List of Bool (n fold)',
'List of Str (n fold) == Str -> List of Bool (n fold)',
'List of Bool (n fold) == Bool -> List of Bool (n fold)',
'List of Real (n fold) == List of Real (n fold) -> List of Bool (n fold)',
'List of Cmplx (n fold) == List of Cmplx (n fold) -> List of Bool (n fold)',
'List of Str (n fold) == List of Str (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) == List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if not res_t or type(res_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.Bool.inst()
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if not res_t or type(res_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, TypeSystem.Bool.inst())
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if not res_t or type(res_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, TypeSystem.Bool.inst())
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if not res_t.chd_t or type(res_t.chd_t) == TypeSystem.Void:
return None
else:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, TypeSystem.Bool.inst())
return t_env
@final
class Abv(CompOp):
__PRECD: Final[Tuple[int, int]] = (10, 9)
__SYM: Final[str] = '<'
__SGN: Final[List[str]] = ['Real < Real -> Bool',
'Str < Str -> Bool',
'Bool < Bool -> Bool',
'Sym < Sym -> Sym',
'Real < List of Real (n fold) -> List of Bool (n fold)',
'Str < List of Str (n fold) -> List of Bool (n fold)',
'Bool < List of Bool (n fold) -> List of Bool (n fold)',
'List of Real (n fold) < Real -> List of Bool (n fold)',
'List of Str (n fold) < Str -> List of Bool (n fold)',
'List of Bool (n fold) < Bool -> List of Bool (n fold)',
'List of Real (n fold) < List of Real (n fold) -> List of Bool (n fold)',
'List of Str (n fold) < List of Str (n fold) -> List of Str (n fold)',
'List of Bool (n fold) < List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Blw(CompOp):
__PRECD: Final[Tuple[int, int]] = (10, 9)
__SYM: Final[str] = '>'
__SGN: Final[List[str]] = ['Real > Real -> Bool',
'Str > Str -> Bool',
'Bool > Bool -> Bool',
'Sym > Sym -> Sym',
'Real > List of Real (n fold) -> List of Bool (n fold)',
'Str > List of Str (n fold) -> List of Bool (n fold)',
'Bool > List of Bool (n fold) -> List of Bool (n fold)',
'List of Real (n fold) > Real -> List of Bool (n fold)',
'List of Str (n fold) > Str -> List of Bool (n fold)',
'List of Bool (n fold) > Bool -> List of Bool (n fold)',
'List of Real (n fold) > List of Real (n fold) -> List of Bool (n fold)',
'List of Str (n fold) > List of Str (n fold) -> List of Str (n fold)',
'List of Bool (n fold) > List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Geq(CompOp):
__PRECD: Final[Tuple[int, int]] = (10, 9)
__SYM: Final[str] = '<='
__SGN: Final[List[str]] = ['Real <= Real -> Bool',
'Str <= Str -> Bool',
'Bool <= Bool -> Bool',
'Sym <= Sym -> Sym',
'Real <= List of Real (n fold) -> List of Bool (n fold)',
'Str <= List of Str (n fold) -> List of Bool (n fold)',
'Bool <= List of Bool (n fold) -> List of Bool (n fold)',
'List of Real (n fold) <= Real -> List of Bool (n fold)',
'List of Str (n fold) <= Str -> List of Bool (n fold)',
'List of Bool (n fold) <= Bool -> List of Bool (n fold)',
'List of Real (n fold) <= List of Real (n fold) -> List of Bool (n fold)',
'List of Str (n fold) <= List of Str (n fold) -> List of Str (n fold)',
'List of Bool (n fold) <= List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Leq(CompOp):
__PRECD: Final[Tuple[int, int]] = (10, 9)
__SYM: Final[str] = '>='
__SGN: Final[List[str]] = ['Real >= Real -> Bool',
'Str >= Str -> Bool',
'Bool >= Bool -> Bool',
'Sym >= Sym -> Sym',
'Real >= List of Real (n fold) -> List of Bool (n fold)',
'Str >= List of Str (n fold) -> List of Bool (n fold)',
'Bool >= List of Bool (n fold) -> List of Bool (n fold)',
'List of Real (n fold) >= Real -> List of Bool (n fold)',
'List of Str (n fold) >= Str -> List of Bool (n fold)',
'List of Bool (n fold) >= Bool -> List of Bool (n fold)',
'List of Real (n fold) >= List of Real (n fold) -> List of Bool (n fold)',
'List of Str (n fold) >= List of Str (n fold) -> List of Str (n fold)',
'List of Bool (n fold) >= List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,560 | eik4862/TinyCalculator | refs/heads/master | /Core/ErrorManager.py | from Core import Type, DB, SystemManager
from Error import *
from Util import Printer
class ErrManager:
"""
Handle error by generating proper error messages in consistent form.
This class is implemented as singleton.
For the concept of singleton pattern, consult the references below.
**Reference**
* https://en.wikipedia.org/wiki/Singleton_pattern
:cvar __inst: Singleton object.
"""
__inst = None
def __init__(self) -> None:
pass
def __del__(self) -> None:
pass
def __handle_sys_err(self, err: Error.SysErr) -> None:
"""
Handler for error from system manager module.
It handles following errors according to following forms.
* REG_FAIL
[System] ERROR: Cannot register signal handler to {signal name}.
Error message from OS: {error message from OS}
* UNREG_FAIL
[System] ERROR: Cannot unregister signal handler to {signal name}.
Error message from OS: {error message from OS}
* TIMEOUT
[System] ERROR: Timeout.
{error message} (It contains the # of iterations until timeout.)
For detailed information of each error, refer to the comments of ``SysErrT``.
This method is private and called internally as a helper of ``ErrManager.handle_err``.
For detailed description for error handling, refer to the comments of ``ErrManager.handle_err``.
:param err: Interpreter error to be handled.
:type err: Error.InterpErr
"""
buf: Type.BufT = Type.BufT.STDERR # Target buffer.
mark: str = Printer.Printer.inst().f_col('ERROR', Type.Col.RED) # Error mark.
if err.err_no == 25:
Printer.Printer.inst().buf_newline(buf)
lim: int = SystemManager.SysManager.inst().get_sys_var('Input_Timeout').v # Timeout limit.
msg: str = DB.DB.inst().get_err_msg(24).replace('$1', str(lim)) # Error message.
if err.err_t == Type.SysErrT.REG_FAIL:
Printer.Printer.inst().buf(f'[System] {mark}: Cannot register signal handler to {err.sig}.', buf)
Printer.Printer.inst().buf(f'Error message from OS: {err.err_str}.', buf)
Printer.Printer.inst().buf('DB error is critical and cannot be recovered. Terminate.', buf)
Printer.Printer.inst().buf_newline(buf)
return
elif err.err_t == Type.SysErrT.UNREG_FAIL:
Printer.Printer.inst().buf(f'[System] {mark}: Cannot unregister signal handler to {err.sig}.', buf)
Printer.Printer.inst().buf(f'Error message from OS: {err.err_str}.', buf)
Printer.Printer.inst().buf('DB error is critical and cannot be recovered. Terminate.', buf)
Printer.Printer.inst().buf_newline(buf)
return
else:
Printer.Printer.inst().buf(f'[System] {mark}: Timeout.', buf)
if err.err_no == 24:
lim: int = SystemManager.SysManager.inst().get_sys_var('Computation_Timeout').v # Timeout limit.
msg: str = DB.DB.inst().get_err_msg(23).replace('$1', str(lim)) # Error message.
msg = msg.replace('$2', str(err.iter))
elif err.err_no != 25:
msg: str = DB.DB.inst().get_err_msg(err.err_no - 1) # Error message.
Printer.Printer.inst().buf(msg, buf)
Printer.Printer.inst().buf_newline(buf)
def __handle_DB_err(self, err: Error.DBErr) -> None:
"""
Handler for error from DB module.
This method is private and called internally as a helper of ``ErrManager.handle_err``.
For detailed description for error handling, refer to the comments of ``ErrManager.handle_err``.
:param err: DB error to be handled.
:type err: Error.DBErr
"""
# Since DB error may occur before error messages are fully loaded, its error messages are hardcoded here.
buf: Type.BufT = Type.BufT.STDERR # Target buffer.
mark: str = Printer.Printer.inst().f_col('ERROR', Type.Col.RED) # Error mark.
if err.err_t == Type.DBErrT.OPEN_ERR:
Printer.Printer.inst().buf(f'[DB] {mark}: Cannot open DB source file at {err.path}.', buf)
Printer.Printer.inst().buf(f'Error message from OS: {err.err_str}.', buf)
else:
Printer.Printer.inst().buf(f'[DB] {mark}: Cannot close DB source file at {err.path}.', buf)
Printer.Printer.inst().buf(f'Error message from OS: {err.err_str}.', buf)
Printer.Printer.inst().buf('DB error is critical and cannot be recovered. Terminate.', buf)
Printer.Printer.inst().buf_newline(buf)
def __handle_parser_err(self, err: ParserError) -> None:
"""
Handler for error from parser module.
This method is private and called internally as a helper of ``ErrManager.handle_err``.
For detailed description for error handling, refer to the comments of ``ErrManager.handle_err``.
:param err: Parser error to be handled.
:type err: Error.ParserErr
"""
buf: Type.BufT = Type.BufT.STDERR # Target buffer.
mark: str = Printer.Printer.inst().f_col('ERROR', Type.Col.RED) # Error mark.
err_t: type = type(err) # Error type.
if err_t == ParserError.EmptyExpr:
Printer.Printer.inst().buf(f'[Parser] {mark}: Empty expression.', buf)
elif err_t == ParserError.InvalidTok:
Printer.Printer.inst().buf(err.line, buf)
Printer.Printer.inst().buf('~' * err.pos + '^', buf)
Printer.Printer.inst().buf(f'[Parser] {mark}: Invalid token.', buf)
else:
Printer.Printer.inst().buf(err.line, buf)
Printer.Printer.inst().buf('~' * err.pos + '^', buf)
Printer.Printer.inst().buf(f'[Parser] {mark}: Invalid expression.', buf)
if err.errno == 2:
msg: str = DB.DB.inst().get_err_msg(1).replace('$1', err.line[err.pos]) # Error message.
elif err.errno == 11:
msg: str = DB.DB.inst().get_err_msg(10) # Error message.
msg = msg.replace('$1', err.err_op[0].sym()).replace('$2', err.err_op[1].sym())
else:
msg: str = DB.DB.inst().get_err_msg(err.errno - 1) # Error message.
Printer.Printer.inst().buf(msg, buf)
Printer.Printer.inst().buf_newline(buf)
def __handle_interp_err(self, err: InterpreterError) -> None:
"""
Handler for error from interpreter module.
This method is private and called internally as a helper of ``ErrManager.handle_err``.
For detailed description for error handling, refer to the comments of ``ErrManager.handle_err``.
:param err: Interpreter error to be handled.
:type err: Error.InterpErr
"""
buf: Type.BufT = Type.BufT.STDERR # Target buffer.
mark: str = Printer.Printer.inst().f_col('ERROR', Type.Col.RED) # Error mark.
if type(err) == InterpreterError.TErr:
Printer.Printer.inst().buf(err.line, buf)
Printer.Printer.inst().buf('~' * err.pos + '^', buf)
Printer.Printer.inst().buf(f'[Interpreter] {mark}: Type error.', buf)
else:
Printer.Printer.inst().buf(err.line, buf)
Printer.Printer.inst().buf('~' * err.pos + '^', buf)
Printer.Printer.inst().buf(f'[Interpreter] {mark}: Signature is not found.', buf)
if err.errno == 22:
msg: str = DB.DB.inst().get_err_msg(21).replace('$1', str(err.wrong_t)) # Error message.
msg = msg.replace('$2', str(err.right_t))
else:
msg: str = DB.DB.inst().get_err_msg(22).replace('$1', f'\n {err.err_sgn}\n') # Error message.
tmp: str = '\n' # Temporary buffer for candidate signatures.
for i in range(len(err.cand_sgn)):
tmp += f' [{i}] {err.cand_sgn[i]}\n'
msg = msg.replace('$2', tmp).replace('$3', err.handle)
Printer.Printer.inst().buf(msg, buf)
Printer.Printer.inst().buf_newline(buf)
def __handle_util_err(self, err: Error.UtilErr) -> None:
buf: Type.BufT = Type.BufT.STDERR # Target buffer.
mark: str = Printer.Printer.inst().f_col('ERROR', Type.Col.RED) # Error mark.
if err.t == Type.UtilErrT.NOT_FOUND:
Printer.Printer.inst().buf(f'[Cmd.Utility] {mark}: Not found.', buf)
elif err.t == Type.UtilErrT.DOMAIN_OUT:
Printer.Printer.inst().buf(f'[Cmd.Utility] {mark}: Not in domain.', buf)
elif err.t == Type.UtilErrT.RD_ONLY:
Printer.Printer.inst().buf(f'[Cmd.Utility] {mark}: Read only.', buf)
elif err.t == Type.UtilErrT.T_MISMATCH:
Printer.Printer.inst().buf(f'[Cmd.Utility] {mark}: Type error.', buf)
elif err.t == Type.UtilErrT.INF_DETECT:
Printer.Printer.inst().buf(f'[Cmd.Utility] {mark}: Inf detected.', buf)
else:
Printer.Printer.inst().buf(f'[Cmd.Utility] {mark}: Nan detected.', buf)
if err.err_no in [26, 28]:
msg: str = DB.DB.inst().get_err_msg(err.err_no - 1).replace('$1', err.id)
elif err.err_no == 30:
msg: str = DB.DB.inst().get_err_msg(29).replace('$1', err.wrong_t).replace('$2', err.id)
msg = msg.replace('$3', err.correct_t)
else:
msg: str = DB.DB.inst().get_err_msg(err.err_no - 1)
Printer.Printer.inst().buf(msg, buf)
Printer.Printer.inst().buf_newline(buf)
@classmethod
def inst(cls):
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: ErrManager
"""
if not cls.__inst:
cls.__inst = ErrManager()
return cls.__inst
def handle_err(self, err: Error.Err) -> None:
"""
Handle error by generating proper error messages.
Error messages have following general form with slight difference depending on specific error type and error
code.
[{Error source}] ERROR: {Brief description}
{Detailed description of error}
:param err: Error to be handled.
:type err: Error.Err
"""
err_t: type = type(err).__base__
if err_t == Error.ParserErr:
self.__handle_parser_err(err)
elif isinstance(err, Error.DBErr):
self.__handle_DB_err(err)
elif isinstance(err, Error.SysErr):
self.__handle_sys_err(err)
elif isinstance(err, Error.InterpErr):
self.__handle_interp_err(err)
else:
self.__handle_util_err(err)
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,561 | eik4862/TinyCalculator | refs/heads/master | /Operator/Unary.py | from __future__ import annotations
from typing import final, Final, Tuple, Dict, Optional, List
from Core import TypeSystem, Token
from Operator import Operator
class UniOp(Operator.Op):
__ARGC: Final[int] = 1
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t: TypeSystem.T = rt.chd[0].t
if t.base:
if type(t) in [TypeSystem.Real, TypeSystem.Cmplx, TypeSystem.Sym]:
rt.t = t
return t_env
else:
return None
else:
if type(t.chd_t) in [TypeSystem.Real, TypeSystem.Cmplx]:
rt.t = t
return t_env
else:
return None
@final
class Plus(UniOp):
__PRECD: Final[Tuple[int, int]] = (19, 20)
__SYM: Final[str] = '+'
__SGN: Final[List[str]] = ['+Real -> Real',
'+Cmplx -> Cmplx',
'+Sym -> Sym',
'+List of Real (n fold) -> List of Real (n fold)'
'+List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Minus(UniOp):
__PRECD: Final[Tuple[int, int]] = (19, 20)
__SYM: Final[str] = '-'
__SGN: Final[List[str]] = ['-Real -> Real',
'-Cmplx -> Cmplx',
'-Sym -> Sym',
'-List of Real (n fold) -> List of Real (n fold)'
'-List of Cmplx (n fold) -> List of Cmplx (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Trans(UniOp):
__PRECD: Final[Tuple[int, int]] = (24, 23)
__SYM: Final[str] = '\''
__SGN: Final[List[str]] = ['Sym\' -> Sym',
'List of Real (n fold)\' -> List of Real (n fold) given that n >= 2',
'List of Cmplx (n fold)\' -> List of Cmplx (n fold) given that n >= 2',
'List of Str (n fold)\' -> List of Str (n fold)',
'List of Bool (n fold)\' -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t: TypeSystem.T = rt.chd[0].t
if t.base:
if type(t) == TypeSystem.Sym:
rt.t = t
return t_env
else:
return None
else:
if type(t.chd_t) == TypeSystem.Void or t.fold == 1:
return None
else:
rt.t = t
return t_env
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
| {"/Function/Exponential.py": ["/Function/__init__.py"], "/Core/WarningManager.py": ["/Warning/__init__.py"], "/Function/Error.py": ["/Function/__init__.py"], "/Core/DB.py": ["/Error/__init__.py", "/Util/Macro.py"], "/Function/Signal.py": ["/Function/__init__.py"], "/Function/Combination.py": ["/Function/__init__.py"], "/Function/Division.py": ["/Function/__init__.py"], "/Core/Interpreter.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Operator/Delimiter.py": ["/Operator/__init__.py"], "/Operator/Bool.py": ["/Operator/__init__.py"], "/Error/InterpreterError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/Trigonometric.py": ["/Function/__init__.py"], "/Function/Link.py": ["/Function/__init__.py"], "/Function/Hyperbolic.py": ["/Function/__init__.py"], "/Operator/Assign.py": ["/Operator/__init__.py"], "/Core/SystemManager.py": ["/Error/__init__.py"], "/Core/Main.py": ["/Error/__init__.py"], "/Core/AST.py": ["/Operator/__init__.py"], "/Core/Parser.py": ["/Error/__init__.py", "/Warning/__init__.py", "/Operator/__init__.py", "/Function/__init__.py", "/Util/Macro.py"], "/Core/Token.py": ["/Function/__init__.py", "/Operator/__init__.py"], "/Error/ParserError.py": ["/Error/__init__.py", "/Operator/__init__.py"], "/Function/General.py": ["/Function/__init__.py"], "/Test/TestManager.py": ["/Function/__init__.py"], "/Warning/ParserWarning.py": ["/Warning/__init__.py"], "/Core/TypeChecker.py": ["/Operator/__init__.py"], "/Function/Gamma.py": ["/Function/__init__.py"], "/Operator/Binary.py": ["/Operator/__init__.py"], "/Function/Integer.py": ["/Function/__init__.py"], "/Operator/Compare.py": ["/Operator/__init__.py"], "/Core/ErrorManager.py": ["/Error/__init__.py"], "/Operator/Unary.py": ["/Operator/__init__.py"]} |
48,564 | guoguo90/SQN | refs/heads/master | /util_func.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
import random
from numpy import linalg as LA
import math
# ==========================================================================
def CG_Steinhaug_matFree(epsTR, g , deltak, S,Y,nv):
"""
The following function is used for sloving the trust region subproblem
by utilizing "CG_Steinhaug" algorithm discussed in
Nocedal, J., & Wright, S. J. (2006). Nonlinear Equations (pp. 270-302). Springer New York.;
moreover, for Hessian-free implementation, we used the compact form of Hessian
approximation discussed in Byrd, Richard H., Jorge Nocedal, and Robert B. Schnabel.
"Representations of quasi-Newton matrices and their use in limited memory methods."
Mathematical Programming 63.1-3 (1994): 129-156
"""
zOld = np.zeros((nv,1))
rOld = g
dOld = -g
trsLoop = 1e-12
if LA.norm(rOld) < epsTR:
return zOld
flag = True
pk= np.zeros((nv,1))
# for Hessfree
L = np.zeros((Y.shape[1],Y.shape[1]))
for ii in xrange(Y.shape[1]):
for jj in range(0,ii):
L[ii,jj] = S[:,ii].dot(Y[:,jj])
tmp = np.sum((S * Y),axis=0)
D = np.diag(tmp)
M = (D + L + L.T)
Minv = np.linalg.inv(M)
while flag:
################
tmp1 = np.matmul(Y.T,dOld)
tmp2 = np.matmul(Minv,tmp1)
Bk_d = np.matmul(Y,tmp2)
################
if dOld.T.dot(Bk_d) < trsLoop:
tau = rootFinder(LA.norm(dOld)**2, 2*zOld.T.dot(dOld), (LA.norm(zOld)**2 - deltak**2))
pk = zOld + tau*dOld
flag = False
break
alphaj = rOld.T.dot(rOld) / (dOld.T.dot(Bk_d))
zNew = zOld +alphaj*dOld
if LA.norm(zNew) >= deltak:
tau = rootFinder(LA.norm(dOld)**2, 2*zOld.T.dot(dOld), (LA.norm(zOld)**2 - deltak**2))
pk = zOld + tau*dOld
flag = False
break
rNew = rOld + alphaj*Bk_d
if LA.norm(rNew) < epsTR:
pk = zNew
flag = False
break
betajplus1 = rNew.T.dot(rNew) /(rOld.T.dot(rOld))
dNew = -rNew + betajplus1*dOld
zOld = zNew
dOld = dNew
rOld = rNew
return pk
# ==========================================================================
def rootFinder(a,b,c):
"""return the root of (a * x^2) + b*x + c =0"""
r = b**2 - 4*a*c
if r > 0:
num_roots = 2
x1 = ((-b) + np.sqrt(r))/(2*a+0.0)
x2 = ((-b) - np.sqrt(r))/(2*a+0.0)
x = max(x1,x2)
if x>=0:
return x
else:
print "no positive root!"
elif r == 0:
num_roots = 1
x = (-b) / (2*a+0.0)
if x>=0:
return x
else:
print "no positive root!"
else:
print("No roots")
def L_BFGS_two_loop_recursion(g_k,S,Y,k,mmr,gamma_k,nv):
"""
The following function returns the serach direction based
on LBFGS two loop recursion discussed in
Nocedal, J., & Wright, S. J. (2006). Nonlinear Equations (pp. 270-302). Springer New York.
"""
# idx = min(k,mmr)
idx = min(S.shape[1],mmr)
rho = np.zeros((idx,1))
theta = np.zeros((idx,1))
q = g_k
for i in xrange(idx):
rho[idx-i-1] = 1/ S[:,idx-i-1].reshape(nv,1).T.dot(Y[:,idx-i-1].reshape(nv,1))
theta[idx-i-1] =(rho[idx-i-1])*(S[:,idx-i-1].reshape(nv,1).T.dot(q))
q = q - theta[idx-i-1]*Y[:,idx-i-1].reshape(nv,1)
r = gamma_k*q
for j in xrange(idx):
beta = (rho[j])*(Y[:,j].reshape(nv,1).T.dot(r))
r = r + S[:,j].reshape(nv,1)*(theta[j] - beta)
return r
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,565 | guoguo90/SQN | refs/heads/master | /S_LSR1.py | #!/usr/bin/enum_weights python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
import pickle
import os.path
import os
import sys
import tensorflow as tf
import time
from util_func import *
from network import *
from data_generation import *
from sampleSY import *
# ==========================================================================
def S_LSR1(w_init,X,y,seed,numIter,mmr,radius,eps,eta,delta_init,epsTR,num_weights,dnn,sess):
"""Sampled LSR1 method."""
w = w_init
sess.run(dnn.params.assign(w)) # Assign initial weights to parameters of the network
np.random.seed(seed) # Set random seed
numFunEval = 0 # Initialize counters (function values, gradients and Hessians)
numGradEval = 0
numHessEval = 0
deltak = delta_init # Initialize trust region radius
HISTORY = [] # Initialize array for storage
weights_SLSR1 = [] # Initialize array for storing weights
k=0 # Initialize iteration counter
st = time.time() # Start the timer
objFunOld = sess.run(dnn.cross_entropy,feed_dict={dnn.x: X, dnn.y:y}) # Compute function value at current iterate
numFunEval += 1
print objFunOld
# Method while loop (terminate after numIter or Accuracy 1 achieved)
while 1:
gradTemp, acc, xOld = sess.run([dnn.G,dnn.accuracy,dnn.params],
feed_dict={dnn.x: X, dnn.y:y}) # Compute gradient and accuracy
gard_k = gradTemp[0]
numGradEval += 1
norm_g = LA.norm(gard_k)
# Sample S, Y pairs
S,Y,counterSucc,numHessEval = sample_pairs_SY_SLSR1(X,y,num_weights,mmr,radius,eps,dnn,numHessEval,sess)
# Append to History array
HISTORY.append([k, objFunOld,acc,norm_g,numFunEval,numGradEval,numHessEval,numFunEval+numGradEval+numHessEval,
counterSucc,time.time()-st,deltak])
print HISTORY[k] # Print History array
if k > numIter or acc ==1: # Terminate if number of iterations > numIter or Accuracy = 1
break
weights_SLSR1.append(sess.run(dnn.params)) # Append weights
sk_TR = CG_Steinhaug_matFree(epsTR, gard_k , deltak,S,Y,num_weights) # Compute step using CG Steinhaug
sess.run(dnn.ASSIGN_OP, feed_dict={dnn.updateVal: xOld + sk_TR }) # Assign new weights
objFunNew = sess.run(dnn.cross_entropy, feed_dict={dnn.x: X, dnn.y:y}) # Compute new function value
numFunEval += 1
ared = objFunOld - objFunNew # Compute actual reduction
Lp = np.zeros((Y.shape[1],Y.shape[1]))
for ii in xrange(Y.shape[1]):
for jj in range(0,ii):
Lp[ii,jj] = S[:,ii].dot(Y[:,jj])
tmpp = np.sum((S * Y),axis=0)
Dp = np.diag(tmpp)
Mp = (Dp + Lp + Lp.T)
Minvp = np.linalg.inv(Mp)
tmpp1 = np.matmul(Y.T,sk_TR)
tmpp2 = np.matmul(Minvp,tmpp1)
Bk_skTR = np.matmul(Y,tmpp2)
pred = -(gard_k.T.dot(sk_TR) + 0.5* sk_TR.T.dot(Bk_skTR)) # Compute predicted reduction
# Take step
if ared/pred > eta:
xNew = xOld + sk_TR
objFunOld = objFunNew
else:
xNew = xOld
# Update trust region radius
if ared/pred > 0.75:
deltak = 2*deltak
elif ared/pred>=0.1 and ared/pred <=0.75:
pass # no need to change deltak
elif ared/pred<0.1:
deltak = deltak*0.5
k += 1 # Increment iteration counter
sess.run(dnn.ASSIGN_OP, feed_dict={dnn.updateVal: xNew}) # Assign updated weights
pickle.dump( HISTORY, open( "./_saved_log_files/S_LSR1.pkl", "wb" ) ) # Save History in .pkl file
# pickle.dump( weights_SLSR1, open( "./_saved_log_files/S_LSR1_weights.pkl", "wb" ) ) # Save Weights in .pkl file
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,566 | guoguo90/SQN | refs/heads/master | /main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
import matplotlib.pyplot as plt
import pickle
from S_LSR1 import *
from S_LBFGS import *
from parameters import *
from network import *
from data_generation import *
import os.path
import sys
input1 = sys.argv[1]
# ==========================================================================
def main(opt=input1):
"""Call the selected solver with the selected parameters."""
if opt == "SLSR1":
S_LSR1(w_init,X,y,cp.seed,cp.numIter,cp.mmr,cp.radius,cp.eps,cp.eta,cp.delta_init,cp.epsTR,cp.num_weights,dnn,sess)
elif opt == "SLBFGS":
S_LBFGS(w_init,X,y,cp.seed,cp.numIter,cp.mmr,
cp.radius,cp.eps,cp.alpha_init,cp.cArmijo,cp.rhoArmijo,cp.num_weights,cp.init_sampling_SLBFGS,dnn,sess)
# Get the parameters
cp = parameters()
# Create the data
X,y = getData(cp.num_pts,cp.freq,cp.offset)
# Create network
os.environ["CUDA_VISIBLE_DEVICES"] = cp.GPUnumber
sess = tf.InteractiveSession()
dnn = DNN(cp.sizeNet,cp.activation,cp.mmr)
# Set the initial point
np.random.seed(cp.seed)
w_init = np.random.randn(cp.num_weights,1)
# ==========================================================================
if __name__ == '__main__':
"""Run the selected solver."""
main() | {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,567 | guoguo90/SQN | refs/heads/master | /sampleSY.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
def sample_pairs_SY_SLSR1(X,y,num_weights,mmr,radius,eps,dnn,numHessEval,sess):
""" Function that computes SY pairs for S-LSR1 method"""
Stemp = radius*np.random.randn(num_weights,mmr)
Ytemp = np.squeeze(sess.run([dnn.Hvs], feed_dict={dnn.x: X, dnn.y:y, dnn.vecs: Stemp})).T
numHessEval += 1
S = np.zeros((num_weights,0))
Y = np.zeros((num_weights,0))
counterSucc = 0
for idx in xrange(mmr):
L = np.zeros((Y.shape[1],Y.shape[1]))
for ii in xrange(Y.shape[1]):
for jj in range(0,ii):
L[ii,jj] = S[:,ii].dot(Y[:,jj])
tmp = np.sum((S * Y),axis=0)
D = np.diag(tmp)
M = (D + L + L.T)
Minv = np.linalg.inv(M)
tmp1 = np.matmul(Y.T,Stemp[:,idx])
tmp2 = np.matmul(Minv,tmp1)
Bksk = np.squeeze(np.matmul(Y,tmp2))
yk_BkskDotsk = ( Ytemp[:,idx]- Bksk ).T.dot( Stemp[:,idx] )
if np.abs(np.squeeze(yk_BkskDotsk)) > (
eps *(LA.norm(Ytemp[:,idx]- Bksk ) * LA.norm(Stemp[:,idx])) ):
counterSucc += 1
S = np.append(S,Stemp[:,idx].reshape(num_weights,1),axis = 1)
Y = np.append(Y,Ytemp[:,idx].reshape(num_weights,1),axis=1)
return S,Y,counterSucc,numHessEval
def sample_pairs_SY_SLBFGS(X,y,num_weights,mmr,radius,eps,dnn,numHessEval,sess):
""" Function that computes SY pairs for S-LBFGS method"""
Stemp = radius*np.random.randn(num_weights,mmr)
Ytemp = np.squeeze(sess.run([dnn.Hvs], feed_dict={dnn.x: X, dnn.y:y, dnn.vecs: Stemp})).T
numHessEval += 1
S = np.zeros((num_weights,0))
Y = np.zeros((num_weights,0))
counterSucc = 0
for idx in xrange(mmr):
sTy = Ytemp[:,idx].T.dot(Stemp[:,idx])
if sTy > eps *(LA.norm(Stemp[:,idx])*LA.norm(Ytemp[:,idx])):
gamma_k = np.squeeze((Stemp[:,idx]).T.dot(Ytemp[:,idx])/((Ytemp[:,idx]).T.dot(Ytemp[:,idx])))
S = np.append(S,Stemp[:,idx].reshape(num_weights,1),axis = 1)
Y = np.append(Y,Ytemp[:,idx].reshape(num_weights,1),axis=1)
counterSucc += 1
return S,Y,counterSucc,numHessEval,gamma_k
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,568 | guoguo90/SQN | refs/heads/master | /parameters.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
# ==========================================================================
class parameters :
def __init__(self):
"""Return the setting of paramters."""
#-----------------------------------------------
#----------- Network Parameters ----------------
#-----------------------------------------------
#----------- inputs for SIN problem ------------
self.freq = 8
self.offset = 0.8
self.num_pts = 50
# sin(freq*xx)+offset
# sin(freq*xx)-offset
#----------- activation function ---------------
# activation function can be selected here, the
# possible inputs are "sigmoid", "ReLU" and "Softplus"
self.activation="sigmoid"
#---------------- network size -----------------
# the size of network can be specified here; note that
# it will be fully connected network, e.g. [2,2,2,2,2,2]
# contains 6 layers with 2 nodes in every layer
self.FC1 = 2
self.FC2 = 2
self.FC3 = 2
self.FC4 = 2
self.FC5 = 2
self.FC6 = 2
self.sizeNet =[self.FC1,self.FC2,self.FC3,self.FC4,self.FC5,self.FC6]
dimensionSet = [2*self.FC1, self.FC1, self.FC1*self.FC2, self.FC2,self.FC2*self.FC3,
self.FC3,self.FC3*self.FC4, self.FC4, self.FC4*self.FC5, self.FC5, self.FC5*self.FC6, self.FC6]
self.num_weights = np.sum(dimensionSet) # dimension of the problem
#-----------------------------------------------
#-----------------------------------------------
#-----------------------------------------------
#----------- Algorithm Parameters --------------
#-----------------------------------------------
self.seed = 67 # random seed
self.numIter = 1000 # maximum number of iterations
self.mmr = 10 # memory length for S-LSR1, S-LBFGS
self.radius = 1 # sampling radius for S-LSR1, S-LBFGS
self.eps = 1e-8 # tolerance for updating quasi-Newton matrices
self.eta = 1e-6 # tolerance for ared/pred reduction in TR
self.delta_init = 1 # initial TR radius
self.alpha_init = 1 # initial step length
self.epsTR = 1e-10 # tolernace for CG_Steinhaug
self.cArmijo = 1e-4 # Armijo sufficient decrease parameter
self.rhoArmijo = .5 # Armijo backtracking factor
self.init_sampling_SLBFGS = "off" # S-LBFGS sampling from first iteration
#-----------------------------------------------
#-----------------------------------------------
#-----------------------------------------------
#------------- Other Parameters ----------------
#-----------------------------------------------
self.GPUnumber = "0" # GPU ID
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,569 | guoguo90/SQN | refs/heads/master | /data_generation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
# ==========================================================================
def getData(num_pts = 50, freq = 8.0, offset = 0.8):
"""Get and return the data."""
# Create array with num_pts points between 0 and 1 (i.e., 0,1/num_pts, 2/num_pts,...)
xx = np.array(range(num_pts))*1.0/(num_pts+.0)
# Create positive (xp) and negative (xn) classes
xp = np.sin(freq*xx)+offset
xn = np.sin(freq*xx)-offset
# Concatenate the two arrays into list and reshape
X = [ [xx.tolist()+xx.tolist()],[xp.tolist()+xn.tolist()]]
X = np.reshape(np.array(X),[2,-1])
# Create labels Y
Y = [1 for _ in xrange(num_pts)]
Y = Y + [0 for _ in xrange(num_pts)]
ns = len(Y)
Y = np.array(Y)
X = np.transpose(X)
y = np.zeros([ns,2])
for i in xrange(ns):
y[i,Y[i]] = 1
return X,y
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,570 | guoguo90/SQN | refs/heads/master | /network.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import tensorflow as tf
import numpy as np
import time
# ==========================================================================
def weight_variable(shape, std=0.1):
initial = tf.truncated_normal(shape, stddev=std, dtype=tf.float64)
return tf.Variable(initial,dtype=tf.float64)
# ==========================================================================
class DNN:
"""This class constructs the network used.
The inputs are: (1) sizeNet (total number of weights,
and (2) activation.
Note, that for this code, we fix the network to have
6 fully connected layers, with varying number of nodes.
Moreover, we concatenate the weight matrices into a long
vector as this allows for easier implementation of our
methods."""
def __init__(self,hiddenSizes,activation="sigmoid",mmr=10):
x = tf.placeholder(tf.float64, shape=[None, 2])
y_ = tf.placeholder(tf.float64, shape=[None, 2])
FC1 = hiddenSizes[0]
FC2 = hiddenSizes[1]
FC3 = hiddenSizes[2]
FC4 = hiddenSizes[3]
FC5 = hiddenSizes[4]
FC6 = 2
sizes = [2*FC1, FC1, FC1*FC2, FC2,FC2*FC3, FC3,FC3*FC4, FC4, FC4*FC5, FC5, FC5*FC6, FC6]
n = np.sum(sizes)
params = weight_variable([n, 1],1.0/(n))
uparam = tf.unstack(params,axis = 0)
W1 = tf.reshape(tf.stack( uparam[0:sizes[0]] ), shape=[2,FC1])
b1 = tf.reshape(tf.stack( uparam[sum(sizes[0:1]):sum(sizes[0:1])+sizes[1]] ), shape=[FC1])
W2 = tf.reshape(tf.stack( uparam[sum(sizes[0:2]):sum(sizes[0:2])+sizes[2]] ), shape=[FC1, FC2])
b2 = tf.reshape(tf.stack( uparam[sum(sizes[0:3]):sum(sizes[0:3])+sizes[3]] ), shape=[FC2])
W3 = tf.reshape(tf.stack( uparam[sum(sizes[0:4]):sum(sizes[0:4])+sizes[4]] ), shape=[FC2, FC3])
b3 = tf.reshape(tf.stack( uparam[sum(sizes[0:5]):sum(sizes[0:5])+sizes[5]] ), shape=[FC3])
W4 = tf.reshape(tf.stack( uparam[sum(sizes[0:6]):sum(sizes[0:6])+sizes[6]] ), shape=[FC3, FC4])
b4 = tf.reshape(tf.stack( uparam[sum(sizes[0:7]):sum(sizes[0:7])+sizes[7]] ), shape=[FC4])
W5 = tf.reshape(tf.stack( uparam[sum(sizes[0:8]):sum(sizes[0:8])+sizes[8]] ), shape=[FC4, FC5])
b5 = tf.reshape(tf.stack( uparam[sum(sizes[0:9]):sum(sizes[0:9])+sizes[9]] ), shape=[FC5])
W6 = tf.reshape(tf.stack( uparam[sum(sizes[0:10]):sum(sizes[0:10])+sizes[10]] ), shape=[FC5, FC6])
b6 = tf.reshape(tf.stack( uparam[sum(sizes[0:11]):sum(sizes[0:11])+sizes[11]] ), shape=[FC6])
Ws = [W1,W2,W3,W4,W5,W6]
bs = [b1,b2,b3,b4,b5,b6]
if activation=="sigmoid":
acf = tf.nn.sigmoid
if activation=="ReLU":
acf = tf.nn.relu
if activation=="Softplus":
acf = tf.nn.softplus
a1 = acf(tf.matmul(x, W1) + b1)
a2 = acf(tf.matmul(a1, W2) + b2)
a3 = acf(tf.matmul(a2, W3) + b3)
a4 = acf(tf.matmul(a3, W4) + b4)
a5 = acf(tf.matmul(a4, W5) + b5)
a6 = (tf.matmul(a5, W6) + b6)
#-----------------------------------------------
#----------- Function, Gradient, Hessian, Accuracy and Other Operators --------------
#-----------------------------------------------
output = a6 # Output of network
probdist = tf.nn.softmax(output) # Softmax of output layer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output)) # Cross entropy loss
correct_prediction = tf.equal(tf.argmax(a6, 1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Accuracy computation
self.output= output
self.probdist=probdist
self.x = x
self.y = y_
self.Ws = Ws
self.bs = bs
self.cross_entropy = cross_entropy
self.accuracy = accuracy
self.correct_prediction = correct_prediction
self.params = params
self.updateVal = tf.placeholder(tf.float64, shape=[int(params.shape[0]),1]) # Placeholder for updating parameters
self.updateOp = tf.assign_add(params, self.updateVal).op # Operator for updating parameters
self.G = tf.gradients(cross_entropy,params) # Gradient computation
self.H = tf.hessians(cross_entropy,params) # Hessian computation
self.ASSIGN_OP = tf.assign(self.params, self.updateVal).op # Operator for assigning parameters
Gradient = self.G[0]
self.vecs = tf.placeholder(dtype=tf.float64, shape=[int(self.params.shape[0]), mmr]) #Placeholder for the matrix
self.Gv = tf.reshape(Gradient,shape=(1,-1))
self.grad_vs =(tf.matmul(self.Gv,self.vecs))
self.Hvs = tf.stack([tf.gradients(tm[0], params, stop_gradients=self.vecs) for tm in tf.unstack(self.grad_vs, axis=1) ] ) # Operator for Hessian-matrix product
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,571 | guoguo90/SQN | refs/heads/master | /S_LBFGS.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Albert Berahas, Majid Jahani, Martin Takáč
#
# All Rights Reserved.
#
# Authors: Albert Berahas, Majid Jahani, Martin Takáč
#
# Please cite:
#
# A. S. Berahas, M. Jahani, and M. Takáč, "Quasi-Newton Methods for
# Deep Learning: Forget the Past, Just Sample." (2019). Lehigh University.
# http://arxiv.org/abs/1901.09997
# ==========================================================================
import numpy as np
import pickle
import os.path
import os
import sys
import tensorflow as tf
import time
from util_func import *
from network import *
from data_generation import *
from sampleSY import *
# ==========================================================================
def S_LBFGS(w_init,X,y,seed,numIter,mmr,radius,eps,alpha_init,cArmijo,rhoArmijo,num_weights,init_sampling_SLBFGS,dnn,sess):
"""Sampled LBFGS method."""
w = w_init
sess.run(dnn.params.assign(w)) # Assign initial weights to parameters of the network
np.random.seed(seed) # Set random seed
print(seed)
numFunEval = 0 # Initialize counters (function values, gradients and Hessians)
numGradEval = 0
numHessEval = 0
gamma_k = 1
g_kTemp, objFunOldTemp = sess.run( [dnn.G,[dnn.cross_entropy,dnn.accuracy]] , feed_dict={dnn.x: X, dnn.y:y})
numFunEval += 1
numGradEval += 1
objFunOld = objFunOldTemp[0]
acc = objFunOldTemp[1]
g_k = g_kTemp[0]
norm_g = LA.norm( g_k )
HISTORY = []
weights_SLBFGS = []
k=0
st=time.time()
alpha = alpha_init
while 1:
weights_SLBFGS.append(sess.run(dnn.params))
HISTORY.append([k, objFunOld,acc,norm_g, numFunEval,numGradEval,numHessEval, numFunEval+numGradEval+numHessEval,
time.time()-st,alpha])
print HISTORY[k] # Print History array
if k > numIter or acc ==1: # Terminate if number of iterations > numIter or Accuracy = 1
break
if init_sampling_SLBFGS == "off" and k == 0:
alpha = min(1,1.0/(np.linalg.norm(g_k, ord=1)))
pk = g_k
else:
S,Y,counterSucc,numHessEval,gamma_k = sample_pairs_SY_SLBFGS(X,y,num_weights,mmr,radius,eps,dnn,numHessEval,sess)
pk = L_BFGS_two_loop_recursion(g_k,S,Y,k,mmr,gamma_k,num_weights)
alpha = 2*alpha # change to 2*alpha
mArmijo = -(pk.T.dot(g_k))
x0 = sess.run(dnn.params)
while 1:
# params is the updated variable by adding -alpha* pk to the previous one
sess.run(dnn.updateOp, feed_dict={dnn.updateVal: -alpha* pk })
objFunNew = sess.run(dnn.cross_entropy, feed_dict={dnn.x: X, dnn.y:y})
numFunEval += 1
if objFunOld + alpha*cArmijo* mArmijo < objFunNew :
sess.run(dnn.ASSIGN_OP, feed_dict={dnn.updateVal: x0})
alpha = alpha * rhoArmijo
if alpha < 1e-25:
print "issue with Armijo"
break
else:
break
objFunOld = objFunNew
xNew, acc, g_k_newTemp = sess.run( [dnn.params,dnn.accuracy, dnn.G] , feed_dict={dnn.x: X, dnn.y:y})
numGradEval += 1
g_k = g_k_newTemp[0]
norm_g = LA.norm( g_k )
k += 1
sess.run(dnn.ASSIGN_OP, feed_dict={dnn.updateVal: xNew})
pickle.dump( HISTORY, open( "./_saved_log_files/S_LBFGS.pkl", "wb" ) ) # Save History in .pkl file
# pickle.dump( weights_SLBFGS, open( "./_saved_log_files/S_LBFGS_weights.pkl", "wb" ) ) # Save Weights in .pkl file
| {"/main.py": ["/S_LSR1.py", "/S_LBFGS.py", "/parameters.py", "/network.py", "/data_generation.py"]} |
48,591 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/conf/error_object.py | class ErrorObject():
def __init__(self, errorCode=400, errorMessage="Something went wrong"):
self.errorCode = errorCode
self.errorMessage = errorMessage
return
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,592 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/service_apis/keyword_helper.py | # importing stuff we need
import nltk
# before importing we need to download it
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def filter_sentence(sentence=None):
"""
this fxn takes a sentence and remove stop words from it and return it as a list
Parameters: sentence - string sentence to be filtered
Return: a list of string after removing the stopwords
"""
# empty string
result = []
# get all the words in sentence
words = word_tokenize(sentence)
# get all the stop words
stop_words = set(stopwords.words("english"))
for w in words:
if w not in stop_words:
result.append(w)
print(result)
return result
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,593 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/core/sttext.py |
from speech_to_text.utils.exceptions import GenericCustomException
if __name__ == '__main__':
file = open('../assets/male.wav')
print get_test_from_speech(file)
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,594 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/service_apis/keywords.py | from flask import request
from flask_restful import Resource
import pdb
import keyword_helper as helper
# pdb.set_trace()
class Keywords(Resource):
def post(self):
args = request.get_json()
sentence = args['sentence']
keywords = helper.filter_sentence(sentence)
return {'keywords': keywords}
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,595 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/service_apis/ping.py | from flask import request
from flask_restful import Resource
# from speech_to_text.utils.resource import BaseResource
class Ping(Resource):
def get(self):
query_params = request.args
print "*************", query_params
headers = request.headers
print "--------------->", headers
return {"Success:": True}
get.authenticated = False
def post(self):
return "hello"
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,596 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/conf/service_app.py |
from flask import Flask
from flask_restful import Api
from logging import FileHandler, WARNING, ERROR, INFO
from speech_to_text.service_apis.ping import Ping
from speech_to_text.service_apis.speech_to_text import SpeechToText
from speech_to_text.service_apis.upload import Uplaod
from speech_to_text.service_apis.keywords import Keywords
app = Flask(__name__)
file_handler = FileHandler('errorlog.txt')
file_handler.setLevel(INFO)
app.logger.addHandler(file_handler)
# App Global variables
app.SEND_SMS = False
api = Api(app, prefix='/GeoVideoApplication/')
api.add_resource(Ping, 'ping/')
api.add_resource(Uplaod, 'upload')
api.add_resource(SpeechToText, 'test')
api.add_resource(Keywords, 'keywords')
if __name__ == '__main__':
app.run(host='localhost', port=9999, debug=True)
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,597 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/service_apis/speech_to_text.py | import glob
import speech_recognition as sr
from flask import request
from flask_restful import Resource
# UPLOAD_FOLDER = 'F:\\uploadFolder\\' #os.path.join("F:", "uploadFolder")
fileName = ""
files = ""
data = []
fdata = ""
fnamelist = []
speech_to_text = {}
UPLOAD_FOLDER = '/tmp/uploadFolder/' # os.path.join("F:", "uploadFolder")
def printit(filterfile=""):
global fdata
global strdata
strdata = ""
fdata = ""
text = ""
global speech_to_text
# threading.Timer(10.0, printit).start()
filenames = glob.glob(UPLOAD_FOLDER + "*.wav")
# print(filenames)
if (len(filenames) > 0):
for index in range(len(filenames)):
fileName = filenames[index]
fpathlist = fileName.split("/")
files = fpathlist[len(fpathlist) - 1]
fnamelist = files.split("_")
if (filterfile in files):
fdata = fdata + files + ":"
r = sr.Recognizer()
audio = UPLOAD_FOLDER+files
with sr.AudioFile(audio) as source:
audio = r.record(source)
try:
text = r.recognize_google(audio)
speech_to_text[files] = text
except Exception as e:
print(e)
data.append(text + ":")
strdata = strdata + text + ":"
f = open(UPLOAD_FOLDER + filterfile + ".txt", "w")
f.writelines(data)
f.close()
# return data
return strdata + "@" + fdata
# print(speech_to_text)
# print(strdata + "@" + fdata)
else:
print("file not uploaded")
class SpeechToText(Resource):
def post(self):
args = request.args
files = args['files']
responseFinal = printit(files)
return responseFinal
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,598 | NuttyGeek/AW-ServerCode | refs/heads/master | /speech_to_text/service_apis/upload.py | import os
from flask import request
from flask_restful import Resource
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/tmp/uploadFolder' #os.path.join("F:", "uploadFolder")
class Uplaod(Resource):
def post(self):
file = request.files['image']
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return {"message": True}
| {"/speech_to_text/conf/service_app.py": ["/speech_to_text/service_apis/ping.py", "/speech_to_text/service_apis/speech_to_text.py", "/speech_to_text/service_apis/upload.py", "/speech_to_text/service_apis/keywords.py"]} |
48,603 | bruvio/flask-aws | refs/heads/master | /create_table.py | import os
import sqlite3
try:
os.remove("data.db")
except:
pass
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
# id INTEGER PRIMARY KEY CREATES AUTO incrementing column id
# so we will only have to specify username and password when creating an user
create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text)"
cursor.execute(create_table)
cursor.execute("INSERT INTO users VALUES (1,'bruno', 'asdf')")
create_table = (
"CREATE TABLE IF NOT EXISTS items (id INTEGER PRIMARY KEY, name text, price real)"
)
cursor.execute(create_table)
cursor.execute("INSERT INTO items VALUES (1, 'test', 10.99)")
connection.commit()
connection.close()
print("DONE")
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,604 | bruvio/flask-aws | refs/heads/master | /tests/test.py | import os
import sys
import unittest
import numpy as np
import requests
# sys.path.append("../")
topdir = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(topdir)
from myapp import app
def authenticate():
# first authenticate
url = "http://127.0.0.1:3000/auth"
payload = '{\n\t"username": "bruno",\n\t"password": "asdf"\n}\n'
headers = {"Content-Type": "application/json"}
response = requests.request("POST", url, headers=headers, data=payload)
# token = response.text.decode("utf8")
token = response.json()["access_token"]
return response.status_code, token
class ProjectTests(unittest.TestCase):
############################
#### setup and teardown ####
############################
# executed prior to each test
def setUp(self):
app.config["TESTING"] = True
app.config["DEBUG"] = False
self.app = app.test_client()
self.assertEqual(app.debug, False)
# executed after each test
def tearDown(self):
super(ProjectTests, self).tearDown()
########################
#### helper methods ####
########################
###############
#### tests ####
###############
def test_main_page(self):
response = self.app.get("/", follow_redirects=True)
assert response.status_code == 200
assert response.data == b"Hello, world!"
def test_register(self):
url = "http://127.0.0.1:3000/register"
payload = '{\n\t"username": "bruno",\n\t"password": "asdf"\n}\n'
headers = {"Content-Type": "application/json"}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.json()["message"])
assert response.status_code == 400 # user already exists
# #
# # #
#
def test_auth(self):
status, dummy = authenticate()
assert status == 200
# #
# # #
def test_post_resource_already_exists(self):
status, token = authenticate()
url = "http://127.0.0.1:3000/item/test"
payload = '{\n\t"price": 10.99\n}'
headers = {
"Content-Type": "application/json",
"Authorization": "JWT " + str(token),
}
response = requests.request("POST", url, headers=headers, data=payload)
# assert response.status_code == 400
#
def test_post_resource_doesnot_exist(self):
status, token = authenticate()
url = "http://127.0.0.1:3000/item/desk" + str(
np.random.randint(low=0, high=100, size=1)
)
payload = '{\n\t"price": 10.99\n}'
headers = {
"Content-Type": "application/json",
"Authorization": "JWT " + str(token),
}
response = requests.request("POST", url, headers=headers, data=payload)
assert response.status_code == 201
def test_put_resource_doesnot_exist(self):
status, token = authenticate()
url = "http://127.0.0.1:3000/item/test" + str(
np.random.randint(low=0, high=100, size=1)
)
payload = '{\n\t"price": 10.99\n}'
headers = {
"Content-Type": "application/json",
"Authorization": "JWT " + str(token),
}
response = requests.request("PUT", url, headers=headers, data=payload)
assert response.status_code == 201
def test_put_resource_exists(self):
status, token = authenticate()
url = "http://127.0.0.1:3000/item/desk" + str(
np.random.randint(low=0, high=100, size=1)
)
payload = '{\n\t"price": 212.99\n}'
headers = {
"Content-Type": "application/json",
"Authorization": "JWT " + str(token),
}
response = requests.request("PUT", url, headers=headers, data=payload)
assert response.status_code == 201
def test_delete(self):
status, token = authenticate()
url = "http://127.0.0.1:3000/item/test"
payload = {}
headers = {
"Authorization": "JWT " + str(token),
}
response = requests.request("DELETE", url, headers=headers, data=payload)
# print(response.json()["message"])
assert response.json()["message"] == "item deleted"
def test_items(self):
status, token = authenticate()
url = "http://127.0.0.1:3000/items"
payload = {}
headers = {
"Authorization": "JWT " + str(token),
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text.encode("utf8"))
if __name__ == "__main__":
import xmlrunner
runner = xmlrunner.XMLTestRunner(output="tests/test-reports")
unittest.main(testRunner=runner)
###########################################
unittest.main()
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,605 | bruvio/flask-aws | refs/heads/master | /db.py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# this object will allow to map object into row in our database
# and put object into the database
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,606 | bruvio/flask-aws | refs/heads/master | /resources/store.py | from flask_jwt import jwt_required
from flask_restful import Resource, reqparse
from models.store import StoreModel
from db import db
class Store(Resource):
@jwt_required() # d() # we have to authenticate before we run the get method
def get(self, name):
try:
store = StoreModel.find_by_name(name)
except:
return (
{"message": "an Error has occurred inserting the item"},
500,
) # internal server error
if store:
return store.json()
return {"message": "Store not found"}, 404
@jwt_required() # we have to authenticate before we run the get method
def post(self, name):
# error first approach
if StoreModel.find_by_name(name):
return (
{"message": "an store with name '{}' already exists".format(name)},
400,
) # bad request
# once error are cleared we deal with what we want to do
store = StoreModel(name)
try:
store.save_to_db()
except:
return {"message": "an Error has occurred inserting the item"}, 500
return store.json(), 201
# to implement authentication another package is needed Flask-JWT
@jwt_required() # we have to authenticate before we run the get method
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {"message": "store deleted"}, 200
return {"message": "store not found."}, 404
class StoreList(Resource):
def get(self):
return {"stores": [store.json() for store in StoreModel.query.all()]}, 200
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,607 | bruvio/flask-aws | refs/heads/master | /security_class.py | from models.user import UserModel
from werkzeug.security import safe_str_cmp
def authenticate(username, password):
user = UserModel.find_by_username(
username
) # if there is not a username with username will return None
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload["identity"]
return UserModel.find_by_id(user_id)
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,608 | bruvio/flask-aws | refs/heads/master | /resources/item.py | from flask_jwt import jwt_required
from flask_restful import Resource, reqparse
from models.item import ItemModel
from db import db
class Item(Resource):
# @app.route('/item') # three is no need for this!
parser = (
reqparse.RequestParser()
) # initialize a new object the initialize the request
parser.add_argument(
"price", type=float, required=True, help="price field cannot be left blank"
)
parser.add_argument(
"store_id", type=int, required=True, help="Every item need a store id"
)
# parser.add_argument(
# "name", type=str, required=True, help="name field cannot be left blank"
# )
@jwt_required() # d() # we have to authenticate before we run the get method
def get(self, name):
try:
item = ItemModel.find_by_name(name)
except:
return (
{"message": "an Error has occurred inserting the item"},
500,
) # internal server error
if item:
return item.json()
return {"message": "Item not found"}, 404
@jwt_required() # we have to authenticate before we run the get method
def post(self, name):
# error first approach
if ItemModel.find_by_name(name):
return (
{"message": "an item with name '{}' already exists".format(name)},
400,
) # bad request
# once error are cleared we deal with what we want to do
data = (
Item.parser.parse_args()
) # is going to parse the argument that come throught the json payload
# data = request.get_json() # this will fail if we do not set in the header of the request the correct content type and value (json)
# item = ItemModel(name, data["price"],, data["store_id"])
item = ItemModel(name, **data)
try:
item.save_to_db()
except:
return {"message": "an Error has occurred inserting the item"}, 500
return item.json(), 201
# to implement authentication another package is needed Flask-JWT
@jwt_required() # we have to authenticate before we run the get method
def delete(self, name):
item = ItemModel.find_by_name(name)
if item:
item.delete_from_db()
return {"message": "item deleted"}, 200
return {"message": "Item not found."}, 404
@jwt_required() # we have to authenticate before we run the get method
def put(self, name):
data = Item.parser.parse_args()
item = ItemModel.find_by_name(name)
if item:
item.price = data["price"]
else:
# item = ItemModel(name, data["price"], data["store_id"])
item = ItemModel(name, **data)
try:
item.save_to_db()
except:
return {"message": "an Error has occurred inserting the item"}, 500
return item.json(), 201
class ItemList(Resource):
TABLE_NAME = "items"
def get(self):
return {"items": [item.json() for item in ItemModel.query.all()]}, 200
# return {"items": list(map(lambda x: x.json(), ItemModel.query.all()))}
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,609 | bruvio/flask-aws | refs/heads/master | /application.py | from flask import Flask
from flask_jwt import JWT
from flask_restful import Api
from security_class import authenticate, identity
from resources.user import UserRegister
from resources.item import Item, ItemList
from resources.store import Store, StoreList
import os
from db import db
from datetime import timedelta
# import unittest
application = Flask(__name__)
# application.config[
# "SQLALCHEMY_DATABASE_URI"
# ] = os.environ.get('DATABASE_URL','sqlite:///data.db')# # where to find the database
application.config[
"SQLALCHEMY_DATABASE_URI"
] = "sqlite:///data.db" # where to find the database
application.config[
"SQLALCHEMY_TRACK_MODIFICATIONS"
] = False # sql alchemy has its own modification tracker
application.secret_key = "br1"
api = Api(application)
# config JWT to expire within half an hour
application.config["JWT_EXPIRATION_DELTA"] = timedelta(seconds=1800)
@application.before_first_request
def create_tables():
from models.user import UserModel
from models.item import ItemModel
db.create_all() # sql alchemy creates the tables that it sees and this works through imports
admin = UserModel("bruno", "asdf")
test = ItemModel("test", "10.99",'1')
desk = ItemModel("desk", "11.99",'1')
db.session.add(test)
db.session.add(desk)
db.session.add(admin)
db.session.commit()
jwt = JWT(application, authenticate, identity) # allows authentication of users /auth
# this will crate a new endpoint /auth
# when we run this endpoint this will create a username and password and send it over
# if username and password match with the one stored
# in postman we created the tests for the endpoint which are going to help
# in the design of the api
# we have created 5 endpoints
# get items - will give a list of the items
# get item<name> - will give the item uniquely identified by its name
# del item<name> - will delete the item uniquely identified by its name
# post item<name> - will create a new item identified by its name and it will fail if there is already an item with that name
# put item<name> - will create an item uniquely identified if the item does not exists. if the item exist it will update it.
# as per the endpoints we have created we need two resources:
# a list of items
# an item
api.add_resource(Store, "/store/<string:name>")
api.add_resource(StoreList, "/stores")
api.add_resource(Item, "/item/<string:name>")
api.add_resource(ItemList, "/items")
api.add_resource(
UserRegister, "/register"
) # a post request to register will call the post method
@application.route("/") # we are specifying the endopoint ##'http://www.google.com
def home():
return "Hello, world!"
db.init_app(application)
application.run(host='0.0.0.0',port=80)
if __name__ == "__main__":
db.init_app(application)
application.run(port=80)
# application.run(port=3000, debug=True) # enable debug
#application.run()
# if __name__ == "__main__":
# ############# Add these lines #############
# import xmlrunner
#
# runner = xmlrunner.XMLTestRunner(output="test-reports")
# unittest.main(testRunner=runner)
# ###########################################
# unittest.main()
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,610 | bruvio/flask-aws | refs/heads/master | /resources/user.py | # will define a user object
import sqlite3
from flask_restful import Resource, reqparse
from models.user import UserModel
# so this class can interact with sql lite
class UserRegister(Resource):
parser = (
reqparse.RequestParser()
) # initialize a new object the initialize the request
parser.add_argument(
"username", type=str, required=True, help="name field cannot be left blank"
)
parser.add_argument(
"password", type=str, required=True, help="password field cannot be left blank"
)
def post(self):
data = (
UserRegister.parser.parse_args()
) # is going to parse the argument that come throught the json payload
if UserModel.find_by_username(
data["username"]
): # if there is a user with that username we are using tor register
return {"message": "User already registered "}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully"}, 201 # resource created
# this is the method that is going to be called when we post resources
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,611 | bruvio/flask-aws | refs/heads/master | /models/store.py | from db import db
class StoreModel(db.Model):
__tablename__ = "stores"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80)) # max name size is 80 char
# item = db.relationship('ItemModel') #this says there is a relationship between Storemodel and itemmodel
# this is a many to one relationship so
# item is a list of items
# and it will be created an object for every item in the Store
# to reduce the amount of memory used we will use
items = db.relationship("ItemModel", lazy="dynamic")
def __init__(self, name):
self.name = name
def json(self):
return {
"name": self.name,
"items": [item.json() for item in self.items.all()],
} # .all transform self.items in a query builder insted of a list and .all retrieves all the items in that table
# this will make this method slower (it's a trade off.. previous version will have store creation slower)
@classmethod
def find_by_name(cls, name):
# using sql alchemy to make queries
return cls.query.filter_by(
name=name
).first() # same as Select * from items where name= name LIMIT 1
# is returning an object
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| {"/resources/store.py": ["/models/store.py", "/db.py"], "/resources/item.py": ["/db.py"], "/application.py": ["/security_class.py", "/resources/user.py", "/resources/item.py", "/resources/store.py", "/db.py"], "/models/store.py": ["/db.py"]} |
48,612 | cpfair/pebble-qibla-www | refs/heads/master | /www.py | from flask import Flask, redirect, request, render_template, jsonify
from models import User
from timetable import TimetableResolver
from timeline import Timeline
from datetime import datetime
import logging
import json
from raven.contrib.flask import Sentry
app = Flask(__name__)
sentry = Sentry(app, logging=True, level=logging.ERROR)
@app.route('/settings/<user_token>', methods=["GET", "POST"])
def settings(user_token):
return render_template('timeline_shutdown.html')
@app.route('/')
def index():
return "marhaba!"
return redirect('https://apps.getpebble.com/applications/53ab84141d576ea3c30000d6')
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0")
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,613 | cpfair/pebble-qibla-www | refs/heads/master | /timetables/base.py | class Timetable:
@classmethod
def CacheKey(cls, location, date):
# Should return a cache key for Times()
raise NotImplemented()
@classmethod
def Times(cls, location, date):
# Should return a list of (location_name_option, date, times_dict) tuples
# - geoname_option, if the timetable is for a fixed location.
# - date is a TZ-naive date
# - times_dict is in the style of PrayTimes' return.
# i.e. fractional hours since midnight for each time, in UTC.
raise NotImplemented()
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,614 | cpfair/pebble-qibla-www | refs/heads/master | /timetables/london_unified.py | from .base import Timetable
from keys import LONDON_UNIFIED_KEY
from datetime import datetime
from pytz import timezone, utc
import requests
class LondonUnified(Timetable):
@classmethod
def CacheKey(cls, location, date):
return ""
@classmethod
def _mangleTime(cls, time_str, date, aft, maybe_morn):
time = datetime.strptime(time_str, "%H:%M").time()
if aft:
# 10 allows for exceptionally early dhuhr.
if time.hour < (10 if maybe_morn else 12):
time = time.replace(hour=time.hour + 12)
dt = timezone("Europe/London").localize(datetime.combine(date, time))
utc_dt = dt.astimezone(utc).replace(tzinfo=None)
since_midnight = utc_dt - utc_dt.replace(hour=0, minute=0, second=0, microsecond=0)
return since_midnight.total_seconds() / 3600
@classmethod
def Times(cls, location, date):
params = {
"key": LONDON_UNIFIED_KEY,
"format": "json",
"date": date.strftime("%Y-%m-%d")
}
time_table = requests.get("http://www.londonprayertimes.com/api/times/", params=params).json()
return (("London", date, {
"fajr": cls._mangleTime(time_table["fajr"], date, False, False),
"sunrise": cls._mangleTime(time_table["sunrise"], date, False, False),
"dhuhr": cls._mangleTime(time_table["dhuhr"], date, True, True),
"asr": cls._mangleTime(time_table["asr"], date, True, False),
"maghrib": cls._mangleTime(time_table["magrib"], date, True, False),
"isha": cls._mangleTime(time_table["isha"], date, True, False)
}),)
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,615 | cpfair/pebble-qibla-www | refs/heads/master | /timetables/singapore.py | from .base import Timetable
import datetime
from pytz import timezone, utc
import requests
import PyPDF2
import re
import io
timetable_pdfs = {
2016: "http://www.muis.gov.sg/documents/Resource_Centre/Prayer_Timetable_2016.pdf",
2017: "http://www.muis.gov.sg/documents/Resource_Centre/Prayer%20Timetable%202017.pdf",
2018: "https://www.muis.gov.sg/-/media/Files/Corporate-Site/Prayer-Timetable-2018.pdf"
}
class Singapore(Timetable):
@classmethod
def CacheKey(cls, location, date):
return ""
@classmethod
def _mangleTime(cls, time_str, date, aft):
time = datetime.datetime.strptime(time_str.replace("\n", ""), "%H %M").time()
if aft:
if time.hour < 12:
time = time.replace(hour=time.hour + 12)
dt = timezone("Asia/Singapore").localize(datetime.datetime.combine(date, time))
utc_dt = dt.astimezone(utc).replace(tzinfo=None)
since_midnight = utc_dt - datetime.datetime.combine(date, datetime.datetime.min.time())
return since_midnight.total_seconds() / 3600
@classmethod
def Times(cls, location, date):
time_table_pdf_req = requests.get(timetable_pdfs[date.year])
time_table_pdf = PyPDF2.PdfFileReader(io.BytesIO(time_table_pdf_req.content))
results = []
for page in time_table_pdf.pages:
text = page.extractText()
for time_row in re.finditer(r"(?P<date>\d+\n?/\n?\d+\n?/\n?\d{4})\s+\w+\s+(?P<fajr>\d{1,2}\s+\d\n?\d)\s+(?P<sunrise>\d{1,2}\s+\d\n?\d)\s+(?P<dhuhr>\d{1,2}\s+\d\n?\d)\s+(?P<asr>\d{1,2}\s+\d\n?\d)\s+(?P<magrib>\d{1,2}\s+\d\n?\d)\s+(?P<isha>\d{1,2}\s+\d\n?\d)", text):
date_parts = list(int(x.strip()) for x in time_row.group("date").split("/"))
date = datetime.date(day=date_parts[0], month=date_parts[1], year=date_parts[2])
results.append(("Singapore", date, {
"fajr": cls._mangleTime(time_row.group("fajr"), date, False),
"sunrise": cls._mangleTime(time_row.group("sunrise"), date, False),
"dhuhr": cls._mangleTime(time_row.group("dhuhr"), date, True),
"asr": cls._mangleTime(time_row.group("asr"), date, True),
"maghrib": cls._mangleTime(time_row.group("magrib"), date, True),
"isha": cls._mangleTime(time_row.group("isha"), date, True)
}))
# Check nothing is missing for the year...
missing_data = False
last_date = None
for result in sorted(results, key=lambda x: x[1]):
if last_date:
if result[1] != last_date + datetime.timedelta(days=1):
missing_data = True
print("Skip %s -> %s" % (last_date, result[1]))
last_date = result[1]
assert not missing_data
assert len(results) in (365, 366)
return results
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,616 | cpfair/pebble-qibla-www | refs/heads/master | /timetable.py | from praytimes import PrayTimes
from timetables.london_unified import LondonUnified
from timetables.malaysia import Malaysia
from timetables.singapore import Singapore
from models import TimetableCachedTimes
import datetime
class TimetableResolver:
_resolvers = {
"London": LondonUnified,
"Malaysia": Malaysia,
"Singapore": Singapore
}
_cache = {}
@classmethod
def Methods(cls):
return list(PrayTimes.methods.keys()) + list(cls._resolvers.keys())
@classmethod
def AsrSettingAvailable(cls, method):
if method in PrayTimes.methods.keys():
return True
else:
return False
@classmethod
def ResolveLocationGeoname(cls, method, config, location):
# Cheap.
return cls.Resolve(method, config, location, datetime.datetime.now().date())[0]
@classmethod
def Resolve(cls, method, config, location, date):
def buildCacheKey(loc, date):
return "%s:%s:%s" % (method, resolver.CacheKey(loc, date), date.strftime("%Y-%m-%d"))
if method in TimetableResolver._resolvers:
# Dedicated resolver, vs. calculation.
# We assume this lookup is costly (calling a remote API, and cache it).
resolver = TimetableResolver._resolvers[method]
query_cache_key = buildCacheKey(location, date)
if query_cache_key in TimetableResolver._cache:
return TimetableResolver._cache[query_cache_key]
try:
cache_obj = TimetableCachedTimes.objects.get(key=query_cache_key)
TimetableResolver._cache[query_cache_key] = (cache_obj.location_geoname, cache_obj.times)
except TimetableCachedTimes.DoesNotExist:
multi_day_times = resolver.Times(location, date)
# The resolver returns a list of (location, date, timedict) tuples.
# Obviously the location shouldn't ever change over a range, but oh well, we're storing it discretely anyway.
for location_geoname, date, times in multi_day_times:
day_cache_key = buildCacheKey(location, date)
TimetableResolver._cache[day_cache_key] = (location_geoname, times)
TimetableCachedTimes.objects(key=day_cache_key).update(key=day_cache_key, location_geoname=location_geoname, times=times, upsert=True)
return TimetableResolver._cache[query_cache_key]
else:
pt = PrayTimes()
pt.setMethod(method)
pt.adjust({"asr": config["asr"]})
return None, pt.getTimes(date, location, 0, format="Float")
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,617 | cpfair/pebble-qibla-www | refs/heads/master | /models.py | import mongoengine as me
import os
class User(me.Document):
DEFAULT_CONFIG = {
# These match up with keys in praytimes.py
"method": "ISNA",
"asr": "Standard",
# These don't
"prayer_names": "standard"
}
user_token = me.StringField()
timeline_token = me.StringField()
location = me.PointField()
location_geoname = me.StringField()
tz_offset = me.IntField()
created_at = me.DateTimeField()
subscribed_at = me.DateTimeField()
# It melted down when I tried name the db field "config"
# Not sure what was up
_sparse_config = me.DictField(db_field="sparse_config")
def geocode(self):
import requests
res = requests.get('http://api.geonames.org/findNearbyPlaceNameJSON', params={'lat': self.location[1], 'lng': self.location[0], 'cities': 'cities1000', 'maxRows': 1, 'username': os.environ.get('GEONAMES_USERNAME', 'demo')})
for place in res.json()["geonames"]:
self.location_geoname = place["name"]
@property
def config(self):
if not hasattr(self, "_config_inst"):
self._config_inst = dict(self.DEFAULT_CONFIG)
self._config_inst.update(self._sparse_config)
return self._config_inst
def save(self):
# Paste _config_inst back into _sparse_config if reqd.
if hasattr(self, "_config_inst"):
# Transfer updated keys if not default
for k,v in self._config_inst.items():
if self.DEFAULT_CONFIG[k] != v:
self._sparse_config[k] = v
elif k in self._sparse_config:
del self._sparse_config[k]
# Remove deleted keys
for k,v in self.DEFAULT_CONFIG.items():
if k not in self._config_inst and k in self._sparse_config:
del self._sparse_config[k]
super(User, self).save()
class TimetableCachedTimes(me.Document):
key = me.StringField()
location_geoname = me.StringField()
times = me.DictField()
meta = {
'indexes': [
'key'
]
}
MONGO_URI = os.environ.get('MONGOLAB_URI', None)
MONGODB_SETTINGS = {}
if not MONGO_URI:
me.connect('qibla')
else:
me.connect(MONGO_URI.split("/")[-1], host=MONGO_URI)
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,618 | cpfair/pebble-qibla-www | refs/heads/master | /cron.py | from models import User
from timeline import Timeline
from threading import Event, Lock
# Normally this creates all futures up front
# ...which soaks up a lot of RAM
# So, instead, we create this many initially, and chain new futures off completion callbacks
max_outstanding_futures = 100
future_generator_lock = Lock()
exhausted_future_generator = Event()
user_count = 0
successful_pin_count = 0
failed_pin_count = 0
def pin_future_generator():
global user_count
for user in User.objects(timeline_token__exists=True):
user_count += 1
yield from Timeline.push_pins_for_user(user)
def pin_future_done(future):
global successful_pin_count, failed_pin_count, outstanding_futures
exc = future.exception()
if exc:
failed_pin_count += 1
print(exc)
else:
successful_pin_count += 1
# Start another future, if any are available
try:
with future_generator_lock:
future = next(pin_iter)
future.add_done_callback(pin_future_done)
except StopIteration:
exhausted_future_generator.set()
pin_iter = pin_future_generator()
try:
for x in range(max_outstanding_futures):
with future_generator_lock:
future = next(pin_iter)
future.add_done_callback(pin_future_done)
except StopIteration:
exhausted_future_generator.set()
exhausted_future_generator.wait()
Timeline.executor.shutdown()
print("%d pins pushed, %d failed for %d users" % (successful_pin_count, failed_pin_count, user_count))
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,619 | cpfair/pebble-qibla-www | refs/heads/master | /timeline.py | from timetable import TimetableResolver
from datetime import date, time, timedelta, datetime
from collections import defaultdict
import concurrent.futures
import threading
import requests
import pytz
import json
class Timeline:
PRAYER_NAMES = {
"standard": {
"fajr": "Fajr",
"sunrise": "Sunrise",
"dhuhr": "Dhuhr",
"asr": "Asr",
"maghrib": "Maghrib",
"isha": "Isha"
},
"arabic": {
"fajr": "الفجر",
"sunrise": "الشروق",
"dhuhr": "الظهر",
"asr": "العصر",
"maghrib": "المغرب",
"isha": "العشاء"
},
"turkish": {
"fajr": "İmsak",
"sunrise": "Güneş",
"dhuhr": "Öğle",
"asr": "İkindi",
"maghrib": "Akşam",
"isha": "Yatsı"
}
}
TIMES_TO_PUSH = ["fajr", "dhuhr", "asr", "maghrib", "isha"]
executor = concurrent.futures.ThreadPoolExecutor(max_workers=20)
# I'm not sure if the ThreadPoolExecutor ever shuts down threads, meaning we might need to trim this dict.
executor_http_sessions = defaultdict(lambda: requests.Session())
def push_pins_for_user(user, sync=False, clear=True):
if not user.timeline_token:
# They're not timeline-enabled
return []
pending_pins = []
if clear:
for x in range(-2, 3):
pending_pins += Timeline._delete_pins_for_date(user, date.today() + timedelta(days=x))
# Push pins for yesterday, today, tomorrow, and the day after
# (20 total - just to avoid timezone worries)
for x in range(-1, 3):
pending_pins += Timeline._push_pins_for_date(user, date.today() + timedelta(days=x))
if sync:
# Wait until all our calls clear
concurrent.futures.wait(pending_pins)
else:
return pending_pins
def _push_pins_for_date(user, date):
loc = user.location
if hasattr(loc, "keys"):
loc = loc['coordinates']
loc = loc[::-1] # From the database, it's lon/lat
geoname_option, times = TimetableResolver.Resolve(user.config["method"], user.config, loc, date)
for key in Timeline.TIMES_TO_PUSH:
yield Timeline.executor.submit(Timeline._push_time_pin, user, geoname_option, key, date, datetime.combine(date, time()).replace(tzinfo=pytz.utc) + timedelta(hours=times[key]))
def _delete_pins_for_date(user, date):
for key in Timeline.TIMES_TO_PUSH:
yield Timeline.executor.submit(Timeline._delete_time_pin, user, key, date)
def _delete_time_pin(user, prayer, date):
session = Timeline.executor_http_sessions[threading.current_thread().ident]
pin_id = "%s:%s:%s" % (user.user_token, date, prayer)
res = session.delete("https://timeline-api.getpebble.com/v1/user/pins/%s" % pin_id,
headers={"X-User-Token": user.timeline_token, "Content-Type": "application/json"})
if res.status_code == 410:
# They've uninstalled the app
user.timeline_token = None
user.save()
assert res.status_code == 200, "Pin delete failed %s %s" % (res, res.text)
return True
def _push_time_pin(user, geoname_option, prayer, date, timestamp):
session = Timeline.executor_http_sessions[threading.current_thread().ident]
pin_data = Timeline._generate_pin(user, geoname_option, prayer, date, timestamp)
print(str(pin_data).encode("utf-8"))
res = session.put("https://timeline-api.getpebble.com/v1/user/pins/%s" % pin_data["id"],
data=json.dumps(pin_data),
headers={"X-User-Token": user.timeline_token, "Content-Type": "application/json"})
if res.status_code == 410:
# They've uninstalled the app
user.timeline_token = None
user.save()
assert res.status_code == 200, "Pin push failed %s %s" % (res, res.text)
return True
def _generate_pin(user, geoname_option, prayer, date, timestamp):
pin_id = "%s:%s:%s" % (user.user_token, date, prayer)
prayer_name = Timeline.PRAYER_NAMES[user.config["prayer_names"]][prayer]
geoname = (geoname_option if geoname_option else user.location_geoname)
return {
"id": pin_id,
"time": timestamp.isoformat(),
"layout": {
"type": "genericPin",
"title": prayer_name,
"subtitle": "in %s" % geoname,
"tinyIcon": "system://images/NOTIFICATION_FLAG"
},
"actions": [
{
"title": "Qibla Compass",
"type": "openWatchApp",
"launchCode": 20
}
],
"reminders": [
{
"time": timestamp.isoformat(),
"layout": {
"type": "genericReminder",
"title": prayer_name,
"locationName": "in %s" % geoname,
"tinyIcon": "system://images/NOTIFICATION_FLAG"
}
}
]
}
| {"/www.py": ["/models.py", "/timetable.py", "/timeline.py"], "/timetables/london_unified.py": ["/timetables/base.py"], "/timetables/singapore.py": ["/timetables/base.py"], "/timetable.py": ["/timetables/london_unified.py", "/timetables/singapore.py", "/models.py"], "/cron.py": ["/models.py", "/timeline.py"], "/timeline.py": ["/timetable.py"]} |
48,622 | ajwilson99/DL_Project1 | refs/heads/master | /neuralnet.py | import numpy as np
class Neuron:
# Initialize
def __init__(self, num_inputs, learning_rate, activation, weights, bias):
self.num_inputs = num_inputs
self.learning_rate = learning_rate
self.activation = activation
self.weights = weights
self.bias = bias
# Activation function. Choices include
# - Sigmoid (Logistic)
# - ReLu
# - Linear
def activate(self, input):
if self.activation == "sigmoid":
return 1 / (1 + np.exp(-input))
elif self.activation == "relu":
return np.max(0, input)
elif self.activation == "linear":
return input
def activate_derivative(self, input):
if self.activation == "sigmoid":
return self.activate(input) * (1 - self.activate(input))
elif self.activation == "relu":
return np.asarray(input > 0).astype(int) # Returns "1" if input is greater than zero, "0" otherwise
# Derivative of ReLU function is the step function
elif self.activation == "linear":
return 1
def calculate(self, inputs):
# assert(len(inputs) == len(self.weights))
net = np.dot(inputs, self.weights) + self.bias # net = w^T * x + b
out = self.activate(net) # out = phi(net)
# For use in backpropagation
self.d_out_d_net = self.activate_derivative(net) # activation function derivative
return out
class FullyConnectedLayer:
# Initialize
def __init__(self, num_neurons, activation, num_inputs, learning_rate):
self.num_neurons = num_neurons
self.activation = activation
self.num_inputs = num_inputs
self.learning_rate = learning_rate
self.weights = np.random.randn(num_neurons*num_inputs)
self.bias = np.random.randn(num_neurons)
# Initialize an empty list to store Neuron objects
self.neurons = [None] * self.num_neurons
# Iterate through each neuron (inefficient...)
for neuron in range(0, self.num_neurons):
# Get indices for weight array at this iteration's neuron
n_indices = np.linspace(neuron * self.num_inputs, neuron * self.num_inputs + (self.num_inputs - 1),
self.num_inputs).astype(int)
# Create neuron using the Neuron class
self.neurons[neuron] = Neuron(self.num_inputs, self.learning_rate, self.activation, self.weights[n_indices],
self.bias[neuron])
def calculate(self, inputs):
# A variable to store the output of the neurons in the current layer (for use in backprop)
self.layer_out = np.zeros(self.num_neurons)
# Iterate through each neuron (inefficient...)
for neuron in range(0, self.num_neurons):
# Compute the output of this neuron
self.layer_out[neuron] = self.neurons[neuron].calculate(inputs)
return self.layer_out
class NeuralNetwork:
def __init__(self, parameters):
self.num_hidden_layers = parameters['num hidden layers']
if parameters['num hidden layers'] == 0:
self.num_neurons = parameters['num inputs']
else:
self.num_neurons = parameters['num neurons']
self.num_outputs = parameters['num outputs']
# Make sure the number of chosen activation functions is equal to the number of hidden layers + 1
# (for the output layer)
assert(len(parameters['activations']) == (parameters['num hidden layers'] + 1))
self.activations = parameters['activations']
self.num_inputs = parameters['num inputs']
self.loss_function = parameters['loss function']
self.learning_rate = parameters['learning rate']
# Initialize layers and weights
self.layers = [None] * (self.num_hidden_layers + 1)
# Create input and hidden layers
for l in range(0, self.num_hidden_layers):
# Input
if l == 0:
self.layers[l] = FullyConnectedLayer(self.num_neurons, self.activations[l], self.num_inputs, self.learning_rate)
# Hidden
else:
self.layers[l] = FullyConnectedLayer(self.num_neurons, self.activations[l], self.num_neurons, self.learning_rate)
# Output layer
self.layers[-1] = FullyConnectedLayer(num_neurons = parameters['num outputs'], activation = self.activations[-1],
num_inputs = self.num_neurons, learning_rate = self.learning_rate)
# Feedforward
def calculate(self, input):
cur_inputs = input
for layer in range(0, len(self.layers)):
cur_outputs = self.layers[layer].calculate(cur_inputs) # Recursively compute the network outputs
cur_inputs = cur_outputs
return cur_outputs
# Loss function calculation
def calculateloss(self, desired_output, actual_output):
if self.loss_function == "squared error":
self.loss = (1 / 2) * np.sum((desired_output - actual_output)**2)
return self.loss
elif self.loss_function == "binary cross entropy":
self.loss = -(np.sum(desired_output * np.log(actual_output) + (1 - desired_output) * np.log(1 - actual_output)))
return self.loss
def loss_derivative(self, desired_output, actual_output):
if self.loss_function == "squared error":
self.loss_deriv = -np.sum(desired_output - actual_output)
return self.loss_deriv
elif self.loss_function == "binary cross entropy":
self.loss_deriv = np.sum(-(desired_output / actual_output) + ((1 - desired_output)/(1 - actual_output)))
return self.loss_deriv
def update_weights(self, deltas, input):
num_total_layers = len(self.layers)
# Iterate through each layer
for layer in range(0, num_total_layers):
# Iterate through each neuron to update its input weights
for neur in range(0, len(self.layers[layer].neurons)):
if layer != (num_total_layers - 1): # If the current layer is NOT the output layer!
delta = deltas[neur, layer]
else: # Else the current layer is the output layer
delta = self.out_deltas[neur]
if layer == 0: # If the current layer is the first hidden layer
self.layers[layer].neurons[neur].weights -= (self.learning_rate * (delta * input)) # Weights
self.layers[layer].neurons[neur].bias -= (self.learning_rate * (delta * 1))
else:
outs = self.layers[layer - 1].layer_out
self.layers[layer].neurons[neur].weights -= (self.learning_rate * (delta * outs))
self.layers[layer].neurons[neur].weights -= (self.learning_rate * (delta * 1))
# One iteration of gradient descent
def train(self, inputs, desired_outputs, epochs):
self.inputs = inputs
N = inputs.shape[0] # Number of training samples
self.loss_epoch = np.zeros(epochs) # Initialize vector to contain loss values for each epoch
for iter in range(0, epochs):
self.losses = [None] * N # Vector containing losses for each training sample. Averaged at the end of each iteration of this loop
out = np.zeros(desired_outputs.shape)
# Compute each input through the network to get overall loss and update weights
for i in range(0, len(inputs)):
out[i] = self.calculate(inputs[i]) # Feedforward output for the ith input
self.calculateloss(desired_outputs[i], out[i]) # Calculate loss for the ith input. store in self.loss
self.losses[i] = self.loss
self.out_deltas = np.zeros(len(self.layers[-1].neurons)) # Initialize vector for deltas in output neurons
hidden_deltas = np.zeros([self.num_neurons, self.num_hidden_layers]) # "" matrix "" hidden-layer neurons
# Output layer deltas
for out_neuron in range(len(self.layers[-1].neurons)):
if len(desired_outputs.shape) > 1: # If the output consists of more than one neuron
self.out_deltas[out_neuron] = self.loss_derivative(desired_outputs[i, out_neuron], out[i, out_neuron]) * self.layers[-1].neurons[out_neuron].d_out_d_net
else:
self.out_deltas[out_neuron] = self.loss_derivative(desired_outputs[i], out[i]) * self.layers[-1].neurons[out_neuron].d_out_d_net
# Hidden layer deltas
for hidden_layer in range(self.num_hidden_layers-1, -1, -1): # Work backwards from output layer
for hidden_neuron in range(0, self.num_neurons): # Iterate through each neuron in the layer, starting from the "top"
# Derivative of activation function for this neuron
phi_prime = self.layers[hidden_layer].neurons[hidden_neuron].d_out_d_net
if hidden_layer == (self.num_hidden_layers - 1): # If we need the weights from the output layer, ...
out_weights = np.zeros(len(self.layers[-1].neurons))
for out_neuron in range(len(self.layers[-1].neurons)):
# Weights leaving each neuron in the final hidden layer are the "hidden_neuron"-th weight
# entering each output neuron. For example, for 3 hidden neurons and 2 output units, the "top"
# weight entering each of the output neurons will be used for the deltas in the first hidden unit.
# For the second hidden neuron, the "middle" weights entering each output neuron will be used
# for the deltas in the second hidden neuron, and so on.
out_weights[out_neuron] = self.layers[-1].neurons[out_neuron].weights[hidden_neuron]
hidden_deltas[hidden_neuron, hidden_layer] = phi_prime * np.dot(self.out_deltas, out_weights)
else:
prev_layer_deltas = hidden_deltas[:, hidden_layer + 1]
prev_layer_weights = np.zeros(len(self.layers[hidden_layer + 1].neurons))
for w in range(0, len(self.layers[hidden_layer + 1].neurons)):
prev_layer_weights[w] = self.layers[hidden_layer + 1].neurons[w].weights[hidden_neuron]
hidden_deltas[hidden_neuron, hidden_layer] = phi_prime * np.dot(prev_layer_weights, prev_layer_deltas)
self.update_weights(hidden_deltas, inputs[i]) # Update weights
self.loss_epoch[iter] = np.mean(self.losses) # Add loss value to vector
| {"/main.py": ["/neuralnet.py"]} |
48,623 | ajwilson99/DL_Project1 | refs/heads/master | /main.py | # COSC 525 Deep Learning - Project 1
# Aaron Wilson & Bohan Li
# Due Jan 28 2020
import numpy as np, sys
from neuralnet import NeuralNetwork
import matplotlib.pyplot as plt
def main():
script = str(sys.argv[1])
epochs = 3000
# script = 'example'
if script == "example":
x = np.array([[0.05, 0.1]])
y = np.array([[0.01, 0.99]])
desired_y = np.array([[0.773, 0.778]]) # After one training step (epoch)
parameters = {
"num inputs" : 2,
"num outputs" : 2,
"num hidden layers" : 1,
"num neurons" : 2,
"activations": ['sigmoid'] * 2,
"learning rate": 0.5,
"loss function": 'squared error'
}
nn = NeuralNetwork(parameters)
# In this example, don't use randomly-initialized weights - use the ones from the example. So, to make things
# simple, just over-write the existing, randomly-initialized weights from the NeuralNetwork instantiation.
weights = np.array([[0.15, 0.20, 0.25, 0.30], [0.40, 0.45, 0.50, 0.55]])
biases = np.array([[0.35, 0.35], [0.60, 0.60]])
for l in range(0, len(nn.layers)):
nn.layers[l].weights = weights[l]
nn.layers[l].biases = biases[l]
nn.layers[0].neurons[0].weights = weights[0, 0:2]
nn.layers[0].neurons[1].weights = weights[0, 2::]
nn.layers[0].neurons[0].bias = biases[0, 0]
nn.layers[0].neurons[1].bias = biases[0, 1]
nn.layers[1].neurons[0].weights = weights[1, 0:2]
nn.layers[1].neurons[1].weights = weights[1, 2::]
nn.layers[1].neurons[0].bias = biases[1, 0]
nn.layers[1].neurons[1].bias = biases[1, 1]
nn.train(x, y, epochs=1)
new_output = nn.calculate(x)
# Print to screen
print('\n')
print('Class example: single epoch')
print('--------------------------------------------------------------------------------')
print('Initial output is: {}. After one training step, the next output should be {}, (with sigmoid activation).'.format(y, desired_y))
print('New output is... {}'.format(new_output))
elif script == "and":
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 0, 0, 1])
parameters = {
"num inputs": 2,
"num outputs": 1,
"num hidden layers": 0,
"num neurons": 0,
"activations": ['sigmoid'],
"learning rate": 0.1,
"loss function": 'squared error'
}
nn = NeuralNetwork(parameters)
nn.train(x, y, epochs=epochs)
# Plotting
plt.plot(nn.loss_epoch)
plt.grid()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('AND Neural Network, {} activation and \n {} loss'.format(parameters['activations'][0], parameters['loss function']))
plt.show()
# Printing to screen
print('\n')
print('AND Gate Perceptron:')
print('--------------------------------------------------------------------------------')
print('For y = 1, output should be >= 0.5. For y = 0, output should be < 0.5.')
for input, output in zip(x, y):
print("MULTI-LAYER PERCEPTRON: Output for input x = {} should be y = {}. Computed value: {}".format(input,
output,
nn.calculate(
input)))
elif script == "xor":
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
parameters_perceptron = {
"num inputs": 2,
"num outputs": 1,
"num hidden layers": 0,
"num neurons": 0,
"activations": ['sigmoid'],
"learning rate": 0.1,
"loss function": 'squared error'
}
parameters_multilayer_perceptron = {
"num inputs": 2,
"num outputs": 1,
"num hidden layers": 1,
"num neurons": 4,
"activations": ['sigmoid'] * 2,
"learning rate": 0.1,
"loss function": 'squared error'
}
nn_single = NeuralNetwork(parameters_perceptron)
nn_single.train(x, y, epochs=epochs)
nn_multi = NeuralNetwork(parameters_multilayer_perceptron)
nn_multi.train(x, y, epochs=epochs)
# Plotting
plt.plot(nn_single.loss_epoch, label='Single Perceptron')
plt.ylabel('Loss')
plt.plot(nn_multi.loss_epoch, label='Multi-layer Perceptron')
plt.grid()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.title('XOR Neural Network {} activation and \n {} loss'.format(
parameters_multilayer_perceptron['activations'][0], parameters_multilayer_perceptron['loss function']))
plt.show()
# Printing results to screen
print('\n')
print('XOR Gate: Single-layer Perceptron:')
print('--------------------------------------------------------------------------------')
print('For y = 1, output should be >= 0.5. For y = 0, output should be < 0.5.')
for input, output in zip(x, y):
print("SINGLE PERCEPTRON: Output for input x = {} should be y = {}. Computed value: {}".format(input, output, nn_single.calculate(input)))
print('\n')
print('XOR Gate: Multi-layer Perceptron:')
print('--------------------------------------------------------------------------------')
print('For y = 1, output should be >= 0.5. For y = 0, output should be < 0.5.')
for input, output in zip(x, y):
print("MULTI-LAYER PERCEPTRON: Output for input x = {} should be y = {}. Computed value: {}".format(input, output,
nn_multi.calculate(
input)))
if __name__ == "__main__":
main() | {"/main.py": ["/neuralnet.py"]} |
48,631 | liuhyzhy0909/FMSNLP | refs/heads/master | /TimeNormalizer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/20 16:39
# @Author : zhm
# @File : TimeNormalizer.py
# @Software: PyCharm
import pickle
import regex as re
import arrow
from StringPreHandler import StringPreHandler
from TimePoint import TimePoint
from TimeUnit import TimeUnit
# 时间表达式识别的主要工作类
class TimeNormalizer:
def __init__(self,isPreferFuture=True):
self.isPreferFuture = isPreferFuture
#with open(path, 'rb') as f:
# self.pattern = pickle.load(f,encoding='iso-8859-1')
pkl_file = open('timedict_re.pkl', 'rb')
time_str = pickle.load(pkl_file)
pkl_file.close()
self.pattern = re.compile(time_str)
def parse(self, target):
"""
TimeNormalizer的构造方法,timeBase取默认的系统当前时间
:param target: 待分析字符串
:return: 时间单元数组
"""
self.target = target
self.timeBase = arrow.now().format('YYYY-M-D-H-m-s')
self.oldTimeBase = self.timeBase
self.__preHandling()
self.timeToken = self.__timeEx()
def __preHandling(self):
"""
待匹配字符串的清理空白符和语气助词以及大写数字转化的预处理
:return:
"""
self.target = StringPreHandler.delKeyword(self.target, u"\\s+") # 清理空白符
self.target = StringPreHandler.delKeyword(self.target, u"[的]+") # 清理语气助词
self.target = StringPreHandler.numberTranslator(self.target) # 大写数字转化
def __timeEx(self):
"""
:param target: 输入文本字符串
:param timeBase: 输入基准时间
:return: TimeUnit[]时间表达式类型数组
"""
startline = -1
endline = -1
rpointer = 0
temp = []
match = self.pattern.finditer(self.target)
for m in match:
startline = m.start()
#print(startline)
if startline == endline:
rpointer -= 1
temp[rpointer] = temp[rpointer] + m.group()
else:
a = m.group()
temp.append(a)
endline = m.end()
rpointer += 1
res = []
# 时间上下文: 前一个识别出来的时间会是下一个时间的上下文,用于处理:周六3点到5点这样的多个时间的识别,第二个5点应识别到是周六的。
contextTp = TimePoint()
for i in range(0, rpointer):
res.append(TimeUnit(temp[i], self, contextTp))
contextTp = res[i].tp
res = self.__filterTimeUnit(res)
return res
def __filterTimeUnit(self, tu_arr):
"""
过滤timeUnit中无用的识别词。无用识别词识别出的时间是1970.01.01 00:00:00(fastTime=0)
:param tu_arr:
:return:
"""
if (tu_arr is None) or (len(tu_arr) < 1):
return tu_arr
res = []
for tu in tu_arr:
if tu.time.timestamp != 0:
res.append(tu)
return res
| {"/TimeParse.py": ["/TimeNormalizer.py"], "/NLPForFMS.py": ["/MongoForFMS.py", "/TimeParse.py"], "/Test.py": ["/NLPForFMS.py"]} |
48,632 | liuhyzhy0909/FMSNLP | refs/heads/master | /TimeParse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/22 10:21
# @Author : zhm
# @File : Test.py
# @Software: PyCharm
import importlib,sys
import regex as re
import flask
import arrow
from TimeNormalizer import TimeNormalizer
import pickle
importlib.reload(sys)
#sys.setdefaultencoding('utf-8')
class TimeParse():
def findTime(self,input_str):
'''
时间短语标准时间解析
:param input_str:
:return:
'''
tn = TimeNormalizer(isPreferFuture=True)
t = []
tn.parse(input_str)
if (len(tn.timeToken) != 0):
t.append(tn.timeToken[0].time.format("YYYY-MM-DD"))
return t
def findGsize(self,input_str):
'''
识别时间粒度
:param input_str:
:return:
'''
Day = u"天|日|号"
pattern1 = re.compile(Day)
Day_list = []
for m in pattern1.finditer(input_str):
Day_list.append(m.group())
Interval_month = u"(\\d+(?=(个)?月[以之]?前))|(前\\d+(?=(个)?月[以之]?))"
pattern2 = re.compile(Interval_month)
Interval_month_list = []
for m in pattern2.finditer(input_str):
Interval_month_list.append(m.group())
Day_sign = '日'
Month = u"月"
Season = u"季"
HalfYear = u"半年"
Year = u"年"
Interval_month_sign = 'INM'
Gsize = ''
if (len(Interval_month_list) > 0):
Gsize = Gsize + Interval_month_sign
elif (len(Day_list) > 0):
Gsize = Gsize + Day_sign
elif (input_str.find(Month) > 0 and input_str.find(Day) < 0):
Gsize = Gsize + Month
elif (input_str.find(Season) > 0 and input_str.find(Month) < 0 and input_str.find(Day) < 0):
Gsize = Gsize + Season
elif (input_str.find(HalfYear) > 0 and input_str.find(Season) < 0 and input_str.find(Month) < 0 and input_str.find(Day) < 0):
Gsize = Gsize + HalfYear
elif (input_str.find(Year) > 0 and input_str.find(Season) < 0 and input_str.find(Month) < 0 and input_str.find(
Day) < 0 and input_str.find(HalfYear) < 0):
Gsize = Gsize + Year
return Gsize
def getEndTime(self,timestr,grandsize):
Month = u"月"
Season = u"季"
HalfYear = u"半年"
Year = u"年"
Day = u"日"
Interval_month_sign = 'INM'
EndTime = ''
timeformat = "YYYY-MM"
if grandsize == Year:
start = arrow.get(timestr)
end = start.shift(months=+11)
start = start.format(timeformat)
end = end.format(timeformat)
EndTime = 'begin ' + start + ' end ' + end
elif grandsize == HalfYear:
start = arrow.get(timestr)
end = start.shift(months=+5)
start = start.format(timeformat)
end = end.format(timeformat)
EndTime = 'begin ' + start + ' end ' + end
elif grandsize == Season:
start = arrow.get(timestr)
end = start.shift(months = +2)
start = start.format(timeformat)
end = end.format(timeformat)
EndTime = 'begin ' + start + ' end ' + end
elif grandsize == Interval_month_sign:
start = arrow.get(timestr)
#start = start.shift(month = +1)
end = arrow.utcnow()
start = start.format(timeformat)
end = end.format(timeformat)
EndTime = 'begin ' + start + ' end ' + end
elif grandsize == Month:
EndTime = arrow.get(timestr).format(timeformat)
elif grandsize == Day:
EndTime = arrow.get(timestr).format("YYYY-MM-DD")
return EndTime
def getTime(self,string):#输入字符串
pkl_file = open('timedict_re.pkl', 'rb')
time_str = pickle.load(pkl_file)
pkl_file.close()
pattern = re.compile(time_str)
# 季度暂不支持
#string = u'朔州中心医院去年上半年门诊量。2015年3月4日门诊量。今年8月9月。今年3月门诊量。前天门诊量。本月平均住院日。这个月。今年下半年。当前季度。本季度。下个季度。上季度。第一季度平均住院日。第2季度床位使用率。上个月。当前月。上半年。1月。12月。一月。前年一月份。去年十一月份。后年12月份。2018年六月'
#string = string.split('。')
#string = [x for x in string if len(x) != 0]
#print(string)
time_list = []
for m in pattern.finditer(string):
#print(m.group())
Gsize = self.findGsize(TimeParse,m.group())
Time = self.findTime(TimeParse,m.group())
#print(Time,Gsize)
for t in Time:
endT = self.getEndTime(TimeParse,t,Gsize)
time_list.append(endT)
#print(time_list)
#print("------------------------")
return time_list
if __name__ == "__main__":
pkl_file = open('timedict_re.pkl', 'rb')
time_str = pickle.load(pkl_file)
pkl_file.close()
pattern = re.compile(time_str)
#季度暂不支持
string = u'中心医院前4个月住院收入'
#。全院六七八月份手术量是多少。全院6、7、8月份检查人数是多少。心内科前6个月门诊人次。肾内科3月、6月门诊人次。中医肾内科34月份药占比'
#string = u'肾内科3月、6月门诊人次。朔州中心医院去年上半年门诊量。朔州中心医院上一年总收入是多少。2015年3月4日门诊量。今年8月9月。今年3月门诊量。前天门诊量。本月平均住院日。这个月。今年下半年。当前季度。本季度。下个季度。上季度。第一季度平均住院日。第2季度床位使用率。上个月。当前月。上半年。1月。12月。一月。前年一月份。去年十一月份。后年12月份。2018年六月'
string = string.split('。')
string = [x for x in string if len(x)!=0]
for s in string:
time_list = []
'''
rule = u'[0-9]{4}(年|\\.|\\-)((01)|(02)|(03)|(04)|(05)|(06)|(07)|(08)|(09)|(10)|(11)|(12)|([1-9]))月'
pattern = re.compile(rule)
s = u'2017年06月内一科门诊量'
'''
for m in pattern.finditer(s):
#ss = m.group()
print(m.group())
Gsize = TimeParse.findGsize(TimeParse,m.group())
#print(Gsize)
Time = TimeParse.findTime(TimeParse,m.group())
#print(Time,Gsize)
for t in Time:
endT = TimeParse.getEndTime(TimeParse,t, Gsize)
time_list.append(endT)
# Gsize = findGsize(s)#在匹配后的时间字符串内寻找代表时间粒度的关键字,目的是排查除时间表达式之外的字符串含有相应的关键字
print(time_list)
print("------------------------")
| {"/TimeParse.py": ["/TimeNormalizer.py"], "/NLPForFMS.py": ["/MongoForFMS.py", "/TimeParse.py"], "/Test.py": ["/NLPForFMS.py"]} |
48,633 | liuhyzhy0909/FMSNLP | refs/heads/master | /NLP.py | '''
n/名词 np/人名 ns/地名 ni/机构名 nz/其它专名
m/数词 q/量词 mq/数量词 t/时间词 f/方位词 s/处所词
v/动词 a/形容词 d/副词 h/前接成分 k/后接成分 i/习语
j/简称 r/代词 c/连词 p/介词 u/助词 y/语气助词
e/叹词 o/拟声词 g/语素 w/标点 x/其它 uw/自定义
'''
# -*- coding: utf-8 -*-
import thulac
thu1 = thulac.thulac(user_dict='THUOCL_medical_1.txt') #默认模式
'''
user_dict='C://UserDict//THUOCL_medical.txt'
thulac(user_dict=None, model_path=None, T2S=False, seg_only=False, filt=False)初始化程序,进行自定义设置
user_dict 设置用户词典,用户词典中的词会被打上uw标签。词典中每一个词一行,UTF8编码
T2S 默认False, 是否将句子从繁体转化为简体
seg_only 默认False, 时候只进行分词,不进行词性标注
filt 默认False, 是否使用过滤器去除一些没有意义的词语,例如“可以”。
model_path 设置模型文件所在文件夹,默认为models/
'''
#text = thu1.cut("朔州中心医院最近三个月电子耳蜗植入术数量", text=True) #进行一句话分词
text = thu1.cut("精神科本季度住院人数", text=True)
print(text)
'''
#某项指标在数据库里面没有
#缺省值:全院、近一年
''' | {"/TimeParse.py": ["/TimeNormalizer.py"], "/NLPForFMS.py": ["/MongoForFMS.py", "/TimeParse.py"], "/Test.py": ["/NLPForFMS.py"]} |
48,634 | liuhyzhy0909/FMSNLP | refs/heads/master | /NLPForFMS.py | # -*- coding: utf-8 -*-
import thulac
from MongoForFMS import MyMongoDB
import importlib,sys
import regex as re
from TimeParse import TimeParse
import pickle
importlib.reload(sys)
#sys.setdefaultencoding('utf-8')
class SelectForFMS(object):
#selectTest = '2017年6月全院门诊量最高的科室'#输入文本
'''
{
"keyword":
{ "name":"门诊量",
"type":"max"
},
"dimension":
{
"t":{["6月","7月","8月"]},
"dep":{["内一科"]}
}
}
'''
user_dict = '1_updata.txt'#用户词典
T2S = ''#是否将繁体转换为简体
seg_only = ''#是否进行标注
filt = ''#过滤器
model_path = ''#模型文件夹
model = ''
def fenci(self,js):#分词
print(js)
jsondict = {"dimension": {}, "keyword": {}} # 返回的json串
selectTest = js["NLPstring"]
thu1 = thulac.thulac(user_dict = self.user_dict , seg_only = False) #默认模式
text = thu1.cut(selectTest, text = True)
TimeList = [] # 时间列表
DepartList = ["全院"] # 科室列表,默认全院
mongo = MyMongoDB()
texts = str(text).split(' ')
for i in texts:
tmps = str(i).split("_")
value = mongo.dbfind({"name": tmps[0]})
if (value != None):
print(tmps[0] + "_" + value)
else:
value = tmps[1]
print(tmps[0] + "_" + value)
dimens = str(value).split('_')
if(len(dimens)==1):
# 加入时间列表
'''
if (value == 't'):
TimeList.append(tmps[0])
'''
if (value == 'department'):
if "全院" in DepartList:
DepartList.remove("全院")
DepartList.append(tmps[0])
if (len(dimens) == 2):
jsondict[dimens[0]][dimens[1]] = tmps[0]
if (len(dimens) == 3):
jsondict[dimens[0]][dimens[1]] = [dimens[2]]
'''
获取时间
'''
pkl_file = open('timedict_re.pkl', 'rb')
time_str = pickle.load(pkl_file)
pkl_file.close()
pattern = re.compile(time_str)
Time = []
for m in pattern.finditer(selectTest):
print(m.group())
Gsize = TimeParse.findGsize(TimeParse,m.group())
Time.append(TimeParse.getTime(TimeParse,m.group()))
Time = sum(Time,[])
# Gsize = findGsize(s)#在匹配后的时间字符串内寻找代表时间粒度的关键字,目的是排查除时间表达式之外的字符串含有相应的关键字
#print(Time)
jsondict["dimension"]["t"] = Time #+ Gsize
jsondict["dimension"]["dep"] = DepartList
print(jsondict)
return jsondict
'''
user_dict='C://UserDict//THUOCL_medical.txt'
thulac(user_dict=None, model_path=None, T2S=False, seg_only=False, filt=False)初始化程序,进行自定义设置
user_dict 设置用户词典,用户词典中的词会被打上uw标签。词典中每一个词一行,UTF8编码
T2S 默认False, 是否将句子从繁体转化为简体
seg_only 默认False, 时候只进行分词,不进行词性标注
filt 默认False, 是否使用过滤器去除一些没有意义的词语,例如“可以”。
model_path 设置模型文件所在文件夹,默认为models/
'''
#print(text)
if __name__ == '__main__':
js = {
"id":1,
"NLPstring":"中医科、肾内科3月份药占比"
}
#s = "6月份门诊量最高的科室"
SelectForFMS.fenci(SelectForFMS,js)
| {"/TimeParse.py": ["/TimeNormalizer.py"], "/NLPForFMS.py": ["/MongoForFMS.py", "/TimeParse.py"], "/Test.py": ["/NLPForFMS.py"]} |
48,635 | liuhyzhy0909/FMSNLP | refs/heads/master | /Test.py | from flask import Flask, jsonify, abort, request
from NLPForFMS import SelectForFMS
app = Flask(__name__)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@app.route('/NLP',methods=['POST'])
def NLP():
#ValueList = Aol.getTestValueList();
if not request.json: #or not 'title' in request.json:
abort(400)
rq = SelectForFMS.fenci(SelectForFMS,request.json)
task = rq
#tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/<string:name>', methods=['GET'])
def hello(name):
return "hello " + name + " !"
if __name__ == '__main__':
#app.run()
app.run(host='0.0.0.0',port=5000)
| {"/TimeParse.py": ["/TimeNormalizer.py"], "/NLPForFMS.py": ["/MongoForFMS.py", "/TimeParse.py"], "/Test.py": ["/NLPForFMS.py"]} |
48,636 | liuhyzhy0909/FMSNLP | refs/heads/master | /MongoForFMS.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from pymongo import MongoClient
import regex as re
settings = {
"ip":'localhost', #ip
"port":27017, #端口
"db_name" : "mydb", #数据库名字
"set_name" : "test_set" #集合名字
}
class MyMongoDB(object):
def __init__(self):
try:
self.conn = MongoClient(settings["ip"], settings["port"])
except Exception as e:
print(e)
self.db = self.conn[settings["db_name"]]
self.my_set = self.db[settings["set_name"]]
def insert(self,dic):
print("inser...")
self.my_set.insert(dic)
def update(self,dic,newdic):
print("update...")
self.my_set.update(dic,newdic)
def delete(self,dic):
print("delete...")
self.my_set.remove(dic)
def dbfind(self,dic):
data = self.my_set.find(dic)
for result in data:
return result["value"]
def main():
mongo = MyMongoDB()
#dic = {"name": "药占比", "value": "keyword_name"}
#mongo.insert(dic)
f = open("1_updata.txt",encoding="utf-8")
for line in f:
lines = re.split(r'\s+',str(line).strip())
tname = lines[0]
tvalue = lines[1]
#print("name:"+ tname + "value:" + tvalue)
dic={"name":tname,"value":tvalue}
mongo.insert(dic)
if __name__ == "__main__":
main()
'''
#门诊量 keyword_name
#全院 department
#内一科 department
#最高 keyword_type_max
#最多 keyword_type_max
''' | {"/TimeParse.py": ["/TimeNormalizer.py"], "/NLPForFMS.py": ["/MongoForFMS.py", "/TimeParse.py"], "/Test.py": ["/NLPForFMS.py"]} |
48,652 | JulieNeuro/nitk | refs/heads/master | /nitk/image/img_brain_mask.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
epilog = """
Created on Wed Feb 5 15:29:39 2020
@author: edouard.duchesnay@cea.fr
Compute brain mask
Example:
python nitk/image/img_brain_mask.py --input /neurospin/psy/start-icaar-eugei/derivatives/cat12/vbm/sub-*/ses-*/mri/mwp1sub*.nii
"""
import numpy as np
import scipy
import pandas as pd
import nibabel
import argparse
from nitk.bids import get_keys
from nitk.image import img_to_array
import nilearn
import nilearn.masking
def compute_brain_mask(imgs, target_img=None, mask_thres_mean=0.1, mask_thres_std=1e-6, clust_size_thres=10, verbose=1):
"""
Compute brain mask:
(1) Implicit mask threshold `mean >= mask_thres_mean` and `std >= mask_thres_std`
(2) Use brain mask from `nilearn.masking.compute_gray_matter_mask(target_img)`
(3) mask = Implicit mask & brain mask
(4) Remove small branches with `scipy.ndimage.binary_opening`
(5) Avoid isolated clusters: remove clusters (of connected voxels) smaller that `clust_size_thres`
Parameters
----------
imgs : [str] path to images
or array (n_subjects, 1, , image_axis0, image_axis1, ...) in this case
target_img must be provided.
target_img : nii image
Image defining the referential.
mask_thres_mean : float (default 0.1)
Implicit mask threshold `mean >= mask_thres_mean`
mask_thres_std : float (default 1e-6)
Implicit mask threshold `std >= mask_thres_std`
clust_size_thres : float (clust_size_thres 10)
Remove clusters (of connected voxels) smaller that `clust_size_thres`
verbose : int (default 1)
verbosity level
expected : dict
optional dictionary of parameters to check, ex: dict(shape=(121, 145, 121), zooms=(1.5, 1.5, 1.5))
Returns
-------
nii image:
In referencial of target_img or the first imgs
Example
-------
Parameters
----------
NI_arr : ndarray, of shape (n_subjects, 1, image_shape).
target_img : image.
mask_thres_mean : Implicit mask threshold `mean >= mask_thres_mean`
mask_thres_std : Implicit mask threshold `std >= mask_thres_std`
clust_size_thres : remove clusters (of connected voxels) smaller that `clust_size_thres`
verbose : int. verbosity level
Returns
-------
image of mask
Example
-------
>>> from nitk.image import compute_brain_mask
>>> import glob
>>> imgs = glob.glob("/neurospin/psy/start-icaar-eugei/derivatives/cat12/vbm/sub-*/ses-*/mri/mwp1sub*.nii")
>>> mask_img = compute_brain_mask(imgs)
Clusters of connected voxels #3, sizes= [368569, 45, 19]
>>> mask_img.to_filename("/tmp/mask.nii")
"""
if isinstance(imgs, list) and len(imgs) >= 1 and isinstance(imgs[0], str):
imgs_arr, df, target_img = img_to_array(imgs)
elif isinstance(imgs, np.ndarray) and imgs.ndim >= 5:
imgs_arr = imgs
assert isinstance(target_img, nibabel.nifti1.Nifti1Image)
# (1) Implicit mask
mask_arr = np.ones(imgs_arr.shape[1:], dtype=bool).squeeze()
if mask_thres_mean is not None:
mask_arr = mask_arr & (np.abs(np.mean(imgs_arr, axis=0)) >= mask_thres_mean).squeeze()
if mask_thres_std is not None:
mask_arr = mask_arr & (np.std(imgs_arr, axis=0) >= mask_thres_std).squeeze()
# (2) Brain mask: Compute a mask corresponding to the gray matter part of the brain.
# The gray matter part is calculated through the resampling of MNI152 template
# gray matter mask onto the target image
# In reality in is a brain mask
mask_img = nilearn.masking.compute_gray_matter_mask(target_img)
# (3) mask = Implicit mask & brain mask
mask_arr = (mask_img.get_fdata() == 1) & mask_arr
# (4) Remove small branches
mask_arr = scipy.ndimage.binary_opening(mask_arr)
# (5) Avoid isolated clusters: remove all cluster smaller that clust_size_thres
mask_clustlabels_arr, n_clusts = scipy.ndimage.label(mask_arr)
labels = np.unique(mask_clustlabels_arr)[1:]
for lab in labels:
clust_size = np.sum(mask_clustlabels_arr == lab)
if clust_size <= clust_size_thres:
mask_arr[mask_clustlabels_arr == lab] = False
if verbose >= 1:
mask_clustlabels_arr, n_clusts = scipy.ndimage.label(mask_arr)
labels = np.unique(mask_clustlabels_arr)[1:]
print("Clusters of connected voxels #%i, sizes=" % len(labels),
[np.sum(mask_clustlabels_arr == lab) for lab in labels])
return nilearn.image.new_img_like(target_img, mask_arr)
if __name__ == "__main__":
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument('--input', help='list of niftii images', nargs='+', type=str)
parser.add_argument('-o', '--output', help='niftii file for the mask', type=str)
options = parser.parse_args()
# TODO extends with additional parameters
if options.input is None:
parser.print_help()
raise SystemExit("Error: Input is missing.")
if options.output is None:
options.output = "mask.nii.gz"
mask_img = compute_brain_mask(options.input)
mask_img.to_filename(options.output)
| {"/nitk/image/img_brain_mask.py": ["/nitk/image/__init__.py"], "/nitk/image/__init__.py": ["/nitk/image/img_to_array.py", "/nitk/image/img_brain_mask.py"]} |
48,653 | JulieNeuro/nitk | refs/heads/master | /nitk/image/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 11:12:53 2020
@author: edouard.duchesnay@cea.fr
"""
from .img_to_array import img_to_array
from .img_brain_mask import compute_brain_mask
from .img_global_operations import global_scaling, center_by_site
__all__ = ['img_to_array',
'compute_brain_mask',
'global_scaling',
'center_by_site']
| {"/nitk/image/img_brain_mask.py": ["/nitk/image/__init__.py"], "/nitk/image/__init__.py": ["/nitk/image/img_to_array.py", "/nitk/image/img_brain_mask.py"]} |
48,654 | JulieNeuro/nitk | refs/heads/master | /nitk/image/img_to_array.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 16:45:42 2020
@author: edouard.duchesnay@cea.fr
Load images assuming paths contain a BIDS pattern to retrieve participant_id such sub-<participant_id>
"""
import numpy as np
import pandas as pd
import nibabel
import argparse
from nitk.bids import get_keys
def img_to_array(img_filenames, check_same_referential=True, expected=dict()):
"""
Convert nii images to array (n_subjects, 1, , image_axis0, image_axis1, ...)
Assume BIDS organisation of file to retrive participant_id and session.
Parameters
----------
img_filenames : [str]
path to images
check_same_referential : bool
if True (default) check that all image have the same referential.
expected : dict
optional dictionary of parameters to check, ex: dict(shape=(121, 145, 121), zooms=(1.5, 1.5, 1.5))
Returns
-------
imgs_arr : array (n_subjects, 1, , image_axis0, image_axis1, ...)
The array data structure (n_subjects, n_channels, image_axis0, image_axis1, ...)
df : DataFrame
With column: 'participant_id', 'session', 'path'
ref_img : nii image
The first image used to store referential and all information relative to the images.
Example
-------
>>> from nitk.image import img_to_array
>>> import glob
>>> img_filenames = glob.glob("/neurospin/psy/start-icaar-eugei/derivatives/cat12/vbm/sub-*/ses-*/mri/mwp1sub*.nii")
>>> imgs_arr, df, ref_img = img_to_array(img_filenames)
>>> print(imgs_arr.shape)
(171, 1, 121, 145, 121)
>>> print(df.shape)
(171, 3)
>>> print(df.head())
participant_id session path
0 ICAAR017 V1 /neurospin/psy/start-icaar-eugei/derivatives/c...
1 ICAAR033 V1 /neurospin/psy/start-icaar-eugei/derivatives/c...
2 STARTRA160489 V1 /neurospin/psy/start-icaar-eugei/derivatives/c...
3 STARTLB160534 V1 /neurospin/psy/start-icaar-eugei/derivatives/c...
4 ICAAR048 V1 /neurospin/psy/start-icaar-eugei/derivatives/c...
"""
df = pd.DataFrame([pd.Series(get_keys(filename)) for filename in img_filenames])
imgs_nii = [nibabel.load(filename) for filename in df.path]
ref_img = imgs_nii[0]
# Check expected dimension
if 'shape' in expected:
assert ref_img.get_fdata().shape == expected['shape']
if 'zooms' in expected:
assert ref_img.header.get_zooms() == expected['zooms']
if check_same_referential: # Check all images have the same transformation
assert np.all([np.all(img.affine == ref_img.affine) for img in imgs_nii])
assert np.all([np.all(img.get_fdata().shape == ref_img.get_fdata().shape) for img in imgs_nii])
# Load image subjects x channels (1) x image
imgs_arr = np.stack([np.expand_dims(img.get_fdata(), axis=0) for img in imgs_nii])
return imgs_arr, df, ref_img
if __name__ == "__main__":
parser = argparse.ArgumentParser(epilog=img_to_array.__doc__.split('\n')[1].strip())
parser.add_argument('--input', help='list of niftii images', nargs='+', required=True, type=str)
parser.add_argument('-o', '--output', help='output prefix for csv file', type=str)
options = parser.parse_args()
if options.input is None:
parser.print_help()
raise SystemExit("Error: Input is missing.")
if options.output is None:
options.output = "imgs"
imgs_arr, df, ref_img = img_to_array(options.input, check_same_referential=True, expected=dict())
imgs_arr.tofile(options.output + "_data64.npy")
df.to_csv(options.output + "_participants.csv", index=False )
ref_img.to_filename(options.output + "_imgref.nii.gz")
| {"/nitk/image/img_brain_mask.py": ["/nitk/image/__init__.py"], "/nitk/image/__init__.py": ["/nitk/image/img_to_array.py", "/nitk/image/img_brain_mask.py"]} |
48,655 | JulieNeuro/nitk | refs/heads/master | /nitk/stats/stats_residualizer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 15:15:14 2020
@author: edouard.duchesnay@cea.fr
"""
import numpy as np
import mulm
class Residualizer:
"""
Residualization of a Y data on possibly adjusted for other variables.
If Y is a (n, p) array of p-dependant variables, we want to residualize for
"site" adjusted for "age + sex + diagnosis"
1) `Residualizer(data=df,
formula_res="site",
formula_full=site + age + sex + diagnosis")`
2) `get_design_mat()` will return the numpy (n, k) array design matrix.
Row selection can be done on both Y and design_mat (Cross-val., etc.)
3) `fit(Y, design_mat)` fits the model:
Y = b1 site + b2 age + b3 sex + b4 diagnosis + eps
=> learn and store b1, b2, b3 and b4
4) `transform(Y, design_mat)` Y and design_mat can contains other
observations than the ones used in training phase.
Return Y - b1 site
Parameters
----------
Y: array (n, p)
dependant variables
formula_res: str
Residualisation formula ex: "site"
formula_full: str
Full model (formula) of residualisation containing other variables
to adjust for. Ex.: "site + age + sex + diagnosis"
design_mat: array (n, k)
where Y.shape[0] == design_mat.shape[0] and design_mat.shape[1] is
the same in fit and transform
Returns
-------
Y: array (n, p)
Residualized dependant variables
Example
-------
>>> import numpy as np
>>> import pandas as pd
>>> import scipy.stats as stats
>>> from nitk.stats import Residualizer
>>> import seaborn as sns
>>> np.random.seed(1)
>>>
>>> # Dataset with site effect on age
>>> site = np.array([-1] * 50 + [1] * 50)
>>> age = np.random.uniform(10, 40, size=100) + 5 * site
>>> y = -0.1 * age + site + np.random.normal(size=100)
>>> data = pd.DataFrame(dict(y=y, age=age, site=site.astype(object)))
>>>
>>> # Simple residualization on site
>>> res_spl = Residualizer(data, formula_res="site")
>>> yres = res_spl.fit_transform(y[:, None], res_spl.get_design_mat())
>>>
>>> # Site residualization adjusted for age
>>> res_adj = Residualizer(data, formula_res="site", formula_full="age + site")
>>> yadj = res_adj.fit_transform(y[:, None], res_adj.get_design_mat())
>>>
>>> # Site residualization adjusted for age provides higher correlation,
>>> # and lower stderr than simple residualization
>>> lm_res = stats.linregress(age, yres.ravel())
>>> lm_adj = stats.linregress(age, yadj.ravel())
>>>
>>> np.allclose((lm_res.slope, lm_res.rvalue, lm_res.stderr),
>>> (-0.079187578, -0.623733003, 0.0100242219))
True
>>> np.allclose((lm_adj.slope, lm_adj.rvalue, lm_adj.stderr),
>>> (-0.110779913, -0.7909219758, 0.00865778640))
True
"""
def __init__(self, data, formula_res, formula_full=None):
if formula_full is None:
formula_full = formula_res
res_terms = mulm.design_matrix(formula=formula_res, data=data)[1].keys()
self.design_mat, self.t_contrasts, self.f_contrasts = \
mulm.design_matrix(formula=formula_full, data=data)
# mask of terms in residualize formula within full model
self.mask = np.array([cont for term, cont in self.t_contrasts.items()
if term in res_terms]).sum(axis=0) == 1
def get_design_mat(self):
return self.design_mat
def fit(self, Y, design_mat):
"""
Y: array (n, p)
Dependant variables
design_mat: array(n, k)
Design matrix of independant variables
"""
assert Y.shape[0] == design_mat.shape[0]
assert self.mask.shape[0] == design_mat.shape[1]
self.mod_mulm = mulm.MUOLS(Y, design_mat).fit()
return self
def transform(self, Y, design_mat):
assert Y.shape[0] == design_mat.shape[0]
assert self.mask.shape[0] == design_mat.shape[1]
return Y - np.dot(design_mat[:, self.mask],
self.mod_mulm.coef[self.mask, :])
def fit_transform(self, Y, design_mat):
self.fit(Y, design_mat)
return self.transform(Y, design_mat)
def residualize(Y, data, formula_res, formula_full=None):
"""Helper function. See Residualizer
"""
res = Residualizer(data=data, formula_res=formula_res, formula_full=formula_full)
return res.fit_transform(Y, res.get_design_mat())
if __name__ == '__main__':
import numpy as np
import pandas as pd
import scipy.stats as stats
from nitk.stats import Residualizer
import seaborn as sns
np.random.seed(1)
# Dataset with site effect on age
site = np.array([-1] * 50 + [1] * 50)
age = np.random.uniform(10, 40, size=100) + 5 * site
y = -0.1 * age + site + np.random.normal(size=100)
data = pd.DataFrame(dict(y=y, age=age, site=site.astype(object)))
# Simple residualization on site
res_spl = Residualizer(data, formula_res="site")
yres = res_spl.fit_transform(y[:, None], res_spl.get_design_mat())
# Site residualization adjusted for age
res_adj = Residualizer(data, formula_res="site", formula_full="age + site")
yadj = res_adj.fit_transform(y[:, None], res_adj.get_design_mat())
# Site residualization adjusted for age provides higher correlation, and
# lower stderr than simple residualization
lm_res = stats.linregress(age, yres.ravel())
lm_adj = stats.linregress(age, yadj.ravel())
np.allclose((lm_res.slope, lm_res.rvalue, lm_res.stderr),
(-0.079187578, -0.623733003, 0.0100242219))
np.allclose((lm_adj.slope, lm_adj.rvalue, lm_adj.stderr),
(-0.110779913, -0.7909219758, 0.00865778640))
# Plot
data["yres"] = yres
data["yadj"] = yadj
sns.lmplot("age", "y", hue="site", data=data)
sns.lmplot("age", "yres", hue="site", data=data)
sns.lmplot("age", "yadj", hue="site", data=data)
| {"/nitk/image/img_brain_mask.py": ["/nitk/image/__init__.py"], "/nitk/image/__init__.py": ["/nitk/image/img_to_array.py", "/nitk/image/img_brain_mask.py"]} |
48,656 | JulieNeuro/nitk | refs/heads/master | /nitk/utils/array_utils.py | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 28 17:39:34 2014
@author: edouard.duchesnay@cea.fr
"""
import numpy as np
def arr_get_threshold_from_norm2_ratio(v, ratio=.99):
"""Get threshold to apply to a 1d array such
||v[np.abs(v) >= t]|| / ||v|| == ratio
return the threshold.
Example
-------
>>> import numpy as np
>>> v = np.random.randn(1e6)
>>> t = arr_get_threshold_from_norm2_ratio(v, ratio=.5)
>>> v_t = v.copy()
>>> v_t[np.abs(v) < t] = 0
>>> ratio = np.sqrt(np.sum(v[np.abs(v) >= t] ** 2)) / np.sqrt(np.sum(v ** 2))
>>> print np.allclose(ratio, 0.5)
True
"""
#shape = v.shape
import numpy as np
v = v.copy().ravel()
v2 = (v ** 2)
v2.sort()
v2 = v2[::-1]
v_n2 = np.sqrt(np.sum(v2))
#(v_n2 * ratio) ** 2
cumsum2 = np.cumsum(v2) #np.sqrt(np.cumsum(v2))
select = cumsum2 <= ((v_n2 * ratio) ** 2)
if select.sum() !=0:
thres = np.sqrt(v2[select][-1])
else:
thres = 0
return thres
def arr_threshold_from_norm2_ratio(v, ratio=.99):
"""Threshold input array such
||v[np.abs(v) >= t]|| / ||v|| == ratio
return the thresholded vector and the threshold
Example
-------
>>> import numpy as np
>>> v = np.random.randn(1e6)
>>> v_t, t = arr_threshold_from_norm2_ratio(v, ratio=.5)
>>> ratio = np.sqrt(np.sum(v_t ** 2)) / np.sqrt(np.sum(v ** 2))
>>> print np.allclose(ratio, 0.5)
"""
t = arr_get_threshold_from_norm2_ratio(v, ratio=ratio)
v_t = v.copy()
v_t[np.abs(v) < t] = 0
return v_t, t
def maps_similarity(maps):
"""Map's measures of similarity
Parameters
----------
maps : array(N, P)
compute similarity measures between N maps of dimension P.
Returns
-------
r_bar: average pairwize corelation,
dice_bar: average pairwize dice index
fleiss_kappa_stat:
"""
from statsmodels.stats.inter_rater import fleiss_kappa
# Correlation
R = np.corrcoef(maps)
R = R[np.triu_indices_from(R, 1)]
# Fisher z-transformation / average
z_bar = np.mean(1. / 2. * np.log((1 + R) / (1 - R)))
# bracktransform
r_bar = (np.exp(2 * z_bar) - 1) / (np.exp(2 * z_bar) + 1)
maps_sign = np.sign(maps)
# Paire-wise Dice coeficient
try:
ij = [[i, j] for i in range(maps.shape[0]) for j in range(i+1, maps.shape[0])]
dices = list()
for idx in ij:
A, B = maps_sign[idx[0], :], maps_sign[idx[1], :]
dices.append(float(np.sum((A == B)[(A != 0) & (B != 0)])) / (np.sum(A != 0) + np.sum(B != 0)))
dice_bar = np.mean(dices)
except:
dice_bar = np.NaN
try:
# Compute Fleiss-Kappa statistics
table = np.zeros((maps_sign.shape[1], 3))
table[:, 0] = np.sum(maps_sign == 0, 0)
table[:, 1] = np.sum(maps_sign == 1, 0)
table[:, 2] = np.sum(maps_sign == -1, 0)
fleiss_kappa_stat = fleiss_kappa(table)
except:
fleiss_kappa_stat = np.NaN
return r_bar, dice_bar, fleiss_kappa_stat
"""
np.save("/tmp/betas.npy" ,betas)
betas = np.load("/tmp/betas.npy")
v = betas[1, :]
v_t, t = arr_threshold_from_norm2_ratio(v, ratio=.99)
ratio = np.sqrt(np.sum(v_t ** 2)) / np.sqrt(np.sum(v ** 2))
from brainomics import array_utils
betas_t = np.vstack([array_utils.arr_threshold_from_norm2_ratio(betas[i, :], .99)[0] for i in xrange(betas.shape[0])])
assert np.allclose(np.sqrt(np.sum(betas_t ** 2, 1)) /
np.sqrt(np.sum(betas ** 2, 1)), [0.99]*5)
""" | {"/nitk/image/img_brain_mask.py": ["/nitk/image/__init__.py"], "/nitk/image/__init__.py": ["/nitk/image/img_to_array.py", "/nitk/image/img_brain_mask.py"]} |
48,657 | GermanMoran/Tienda-django | refs/heads/main | /blog/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.blog, name='blog'),
#<categoria> para indicarle que es un parametro de la BD , este es el
#criterio con el cual se realizara el filtrado, se debe indicar como
# entero porque asi se define en la BD
path('categoria/<int:categoria_id>/', views.categoria, name='categoria'),
]
| {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,658 | GermanMoran/Tienda-django | refs/heads/main | /carro/carro.py | class carro:
# Constructor que inicia las tareas mas importantes
def __init__(self, request):
self.request = request
# Con esto ya tenemos iniciada la sección
self.session = request.session
# Ahora debemos construir un carro de compra para esta seccion
carro = self.session.get("carro")
if not carro:
carro=self.session['carro']={}
else:
self.carro=carro
# Metodo para agregar productos al carro
def agregar(self, producto):
# Comprobamos si el producto no este en el carro
if (str(producto.id) not in self.carro.keys()):
self.carro[producto.id]= {
"producto_id":producto.id,
"nombre": producto.nombre,
"precio":str(producto.precio),
"cantidad":1,
"imagen":producto.imagen.url
}
# En caso de que el producto ya este en el carro
else:
for key,value in self.carro.items:
if key == str(producto.id):
value["cantidad"]=value["cantidad"]+1
# ya encontramos el articulo, no recorra mas
break
# Si agregamos un producto por primera vez y7o aumentamos la cantidad, se almacena en la seccion
# Funcion que nos permite guardar la seccion
self.guardar_carro()
def guardar_carro(self):
self.session["carro"]= self.carro
self.session.modified= True
def eliminar_producto(self,producto):
producto.id = str(producto.id)
if producto.id in self.carro:
del self.carro[producto.id]
# Volvemos a guaradar el carro
self.guardar_carro()
# Restar unidades
def restar_productos(self,producto):
for key,value in self.carro.items:
if key == str(producto.id):
value["cantidad"]=value["cantidad"]-1
# ya encontramos el articulo, no recorra mas
if value["cantidad"] < 1:
self.eliminar_producto(producto)
break
self.guardar_carro()
def limpiar_carro(self):
# Construyo un diccionario vacio
self.session['carro']={}
# Modificamos la seccion
self.session.modified= True
| {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,659 | GermanMoran/Tienda-django | refs/heads/main | /carro/context_processor.py |
def importe_carro(request):
total = 125
if request.user.is_authenticated:
for key,value in request.session["carro"].items():
total = total+(float(value["precio"])*value["cantidad"])
return{"importe_total_precio":total}
| {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,660 | GermanMoran/Tienda-django | refs/heads/main | /blog/models.py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# Creamos una clase parala categoria
class Categoria(models.Model):
nombre=models.CharField(max_length=50)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name='categoria'
verbose_name_plural='categorias'
def __str__(self):
return self.nombre
# Creamos una clase para el post
class Post(models.Model):
titulo=models.CharField(max_length=50)
contenido=models.CharField(max_length=50)
#Con esto indicamos que la imagen del blog es opcional
imagen=models.ImageField(upload_to='blog', null = True, blank=True)
# Usamos la clave foranea para establecer la relacion entre el usuario y el post, aqui estamos diciendo que
# cuando se elimine la clase user la eliminacion debe ser en cascada
autor = models.ForeignKey(User, on_delete=models.CASCADE)
# Indicamos la relacion entre estas dos tablas
categorias = models.ManyToManyField(Categoria)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name='post'
verbose_name_plural='posts'
def __str__(self):
return self.titulo | {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,661 | GermanMoran/Tienda-django | refs/heads/main | /carro/views.py | from django.shortcuts import render
from .carro import carro
from tienda.models import Producto
from django.shortcuts import redirect
# Create your views here.
def agregar_producto(request, producto_id):
carro1 = carro(request)
# obtenemos el producto
producto = Producto.objects.get(id= producto_id)
carro1.agregar(producto=producto)
return redirect("tienda")
# Eliminar un producto
def eliminar_productos(request, producto_id):
# Creamos el carro
carro1 = carro(request)
# obtenemos el producto
producto = Producto.objects.get(id= producto_id)
carro1.eliminar_producto(producto=producto)
return redirect("tienda")
def restar_producto(request, producto_id):
# Creamos el carro
carro1 = carro(request)
# obtenemos el producto
producto = Producto.objects.get(id= producto_id)
carro1.restar_productos(producto=producto)
return redirect("tienda")
def limpiarcarro(request, producto_id):
# Creamos el carro
carro1 = carro(request)
carro1.limpiar_carro()
return redirect("tienda") | {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,662 | GermanMoran/Tienda-django | refs/heads/main | /contacto/views.py | from django.shortcuts import render
from .form import FormularioContacto
# Create your views here.
def contacto(request):
#Creamos una instancia de la clase formulario contacto
formulario_contacto = FormularioContacto()
return render(request,'contacto/contacto.html',{'miFormulario':formulario_contacto}) | {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,663 | GermanMoran/Tienda-django | refs/heads/main | /servicios/views.py | from django.shortcuts import render
from servicios.models import servicios
# Create your views here.
def Servicios(request):
ser = servicios.objects.all()
return render(request,'servicios/servicios.html', {'servicios':ser}) | {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,664 | GermanMoran/Tienda-django | refs/heads/main | /tienda/models.py | from django.db import models
# Create your models here.
# Creamos una clase para la categoria
class CategoriaProd(models.Model):
nombre=models.CharField(max_length=50)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
# Pra especificar el nombre en singular y en plurar que va atener la categoria
class Meta:
verbose_name='categoriaProd'
verbose_name_plural='categoriasProd'
def __str__(self):
return self.nombre
# Creamos una clase para el producto
class Producto(models.Model):
nombre=models.CharField(max_length=50)
categorias = models.ForeignKey(CategoriaProd, on_delete=models.CASCADE)
imagen = models.ImageField(upload_to='tienda', null = True, blank=True)
precio = models.FloatField()
disponibilidad = models.BooleanField(default=True)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
# Pra especificar el nombre en singular y en plurar que va atener la categoria
class Meta:
verbose_name='producto'
verbose_name_plural='productos'
def __str__(self):
return self.nombre | {"/carro/views.py": ["/carro/carro.py", "/tienda/models.py"]} |
48,673 | EHounslow/Metagenomics | refs/heads/main | /modelling/main.py | from sklearn.model_selection import train_test_split
from data_loader import extract_abundance_data
from dataset_selector import get_dataset
from variable_definer import get_variables
from disease_selector import get_disease_data
from modellers.random_forest_modeller import run_random_forest
from modellers.logistic_regression_modeller import run_logistic_regression
from modellers.decision_tree_regressor import run_decision_tree_regressor
# Read in data
abundance_data = extract_abundance_data("data/abundance.txt")
data_subset = get_dataset("Quin_gut_liver_cirrhosis", abundance_data)
# Function to select the disease to analyse
combined_disease_control, disease_target, control = get_disease_data("cirrhosis", data_subset)
# Create a list of taxonomic and metadata variables
species, taxonomy, metadata = get_variables(combined_disease_control)
# Define the predictive features
x = combined_disease_control[species]
# Define the target variable, disease
y = combined_disease_control["disease"]
# Split data into training and validation data, for both features and target
train_x, val_x, train_y, val_y = train_test_split(x, y, random_state=0)
# Define logistic regression model
logistic_reg_model = run_logistic_regression(x, y)
disease_x = disease_target[species]
disease_y = disease_target["disease"]
disease_predictions = logistic_reg_model.predict(disease_x)
run_decision_tree_regressor(train_x, val_x, train_y, val_y, x, y)
run_random_forest(train_x, train_y, val_x, val_y)
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,674 | EHounslow/Metagenomics | refs/heads/main | /modelling/modellers/decision_tree_regressor.py | from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
def get_mean_absolute_error(max_leaf_nodes, train_x, val_x, train_y, val_y):
"""
A function that retrieves the mean absolute error based on
different numbers of maximum leaf nodes
"""
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_x, train_y)
preds_val = model.predict(val_x)
mae = mean_absolute_error(val_y, preds_val)
return mae
def run_decision_tree_regressor(train_x, val_x, train_y, val_y, x, y):
""" """
# Create a dictionary containing a range of leaf node numbers with their mae values
candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500]
mae_values = {}
for node in candidate_max_leaf_nodes:
mae = get_mean_absolute_error(node, train_x, val_x, train_y, val_y)
mae_values[node] = mae
# Retrieve the optimum tree size with the lowest mae
best_tree_size = min(mae_values, key=mae_values.get)
# Define decision tree regressor model.
# Specify a number for random_state to ensure same results each run
final_model = DecisionTreeRegressor(max_leaf_nodes=best_tree_size, random_state=0)
# disease_model = DecisionTreeRegressor(random_state=1)
# Now that model has been refined, use all the data to fit it
final_model.fit(x, y)
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,675 | EHounslow/Metagenomics | refs/heads/main | /tests/test_data_loader.py | from pandas.core.frame import DataFrame
from modelling.data_loader import extract_abundance_data
def test_extract_abundance_data_success():
# Arrange
abundance = "data/abundance.txt"
# Act
abundance_data = extract_abundance_data(abundance)
# Assert
assert isinstance(abundance_data, DataFrame)
assert "disease" in abundance_data.columns
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,676 | EHounslow/Metagenomics | refs/heads/main | /modelling/modellers/logistic_regression_modeller.py | from pandas.core.frame import DataFrame
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
def run_logistic_regression(x: DataFrame, y: DataFrame):
"""
Runs a logistic regression model.
Args:
x: (DataFrame): Data for x.
y: (DataFrame): Data for y.
Returns:
Printed statement
"""
logistic_reg_model = LogisticRegression(max_iter=10000)
rfe = RFE(logistic_reg_model, n_features_to_select=10)
rfe = rfe.fit(x, y)
# Summarize the selection of the attributes
print(rfe.support_)
print(rfe.ranking_)
# Fit model
# logistic_reg_model.fit(train_x, train_y)
print(logistic_reg_model.summary())
return logistic_reg_model
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,677 | EHounslow/Metagenomics | refs/heads/main | /tests/test_disease_selector.py | from modelling.dataset_selector import get_dataset
from modelling.data_loader import extract_abundance_data
from modelling.disease_selector import get_disease_data
def test_get_disease_data():
# Arrange
abundance_data = extract_abundance_data("data/abundance.txt")
data_subset = get_dataset("Quin_gut_liver_cirrhosis", abundance_data)
# Act
combined_disease_control, disease_target, control = get_disease_data("cirrhosis", data_subset)
# Assert
assert "cirrhosis" not in combined_disease_control["disease"].values
assert "nd" not in combined_disease_control["disease"].values
assert 1 in combined_disease_control["disease"].values
assert 0 in combined_disease_control["disease"].values
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.