input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
#!/usr/bin/python3
import numpy as np
import unittest
from src import boardcontrol
class Unit_tests_boardcontrol(unittest.TestCase):
"""The class containing the unit-test functions (boardcontrol).
These include setting up the board properly as well as piece
movement validation (including possible capturing).
"""
def __init__(self, *args, **kwargs):
"""Define a pristine board state (initial set-up).
This board state represents a board in which no piece
has been moved at all.
"""
unittest.TestCase.__init__(self, *args, **kwargs)
# (correct) initial set-up board
self.pristine_board = np.empty([8, 8], dtype = str)
# populate the pawns
for i in range(0, 8):
self.pristine_board[1, i] = 'p'
self.pristine_board[6, i] = 'P'
# add the rest of the board pieces
for j in range (0, 8, 7):
self.pristine_board[j, 0] = "r"
self.pristine_board[j, 1] = "n"
self.pristine_board[j, 2] = "b"
self.pristine_board[j, 3] = "q"
self.pristine_board[j, 4] = "k"
self.pristine_board[j, 5] = "b"
self.pristine_board[j, 6] = "n"
self.pristine_board[j, 7] = "r"
# convert the entries into upper case letters for the black pieces
for i in range(0, 8):
self.pristine_board[7, i - 1] = self.pristine_board[7, i - 1].upper()
def test_boardsetup(self):
"""Check, whether the initiated boardstate is correct.
The initiated board state via the imported boardcontrol
function is scrutinized for correctness.
"""
# intialise an unpopulated board
chess_board = np.empty([8, 8], dtype = str)
# reset (and populate / initiate) the board
chess_board = boardcontrol.reset_board(chess_board)
# assert the equality of the two boards (np.arrays)
self.assertIsNone(
np.testing.assert_array_equal(
chess_board,
self.pristine_board
)
)
def test_white_pawn_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[4, 4] = "p" # center pawn
chess_board[5, 5] = "R" # enemy piece to capture
chess_board[7, 4] = "p" # 'edge' pawn
# check the pawn (at [44]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"44p",
"white"
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
['54', '55']
)
)
# check the pawn (at [74]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"74p",
"white"
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
no_poss_moves
)
)
def test_white_rook_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[0, 0] = "r" # edge rook1
chess_board[7, 7] = "r" # edge rook2
chess_board[3, 7] = "p" # edge rook2
chess_board[2, 0] = "P" # edge rook2
# check the rook (at [00]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"00r",
"white"
)
# define the possible moves by this rook
valid_possible_moves = np.array(
[
'01', '02', '03', '04',
'05', '06', '07', '10',
'20'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
# check the rook (at [77]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"77r",
"white"
)
# define the possible moves by this rook
valid_possible_moves = np.array(
[
'47', '57', '67', '70',
'71', '72', '73', '74',
'75', '76'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
def test_wite_knight_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[0, 0] = "n"
chess_board[7, 7] = "n"
chess_board[2, 1] = "n"
chess_board[1, 2] = "R"
chess_board[5, 6] = "R"
# check the knight (at [00]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"00n",
"white"
)
# define the possible moves by this knight
valid_possible_moves = np.array(
[
'12'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
# check the knight (at [77]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"77n",
"white"
)
# define the possible moves by this knight
valid_possible_moves = np.array(
[
'56', '65'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
def test_white_bishop_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[4, 4] = "b"
chess_board[0, 0] = "k"
chess_board[2, 6] = "R"
# check the bishop (at [44]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"44b",
"white"
)
# define the possible moves by this bishop
valid_possible_moves = np.array(
[
'11', '22', '26', '33',
'35', '53', '55', '62',
'66', '71', '77'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
def test_white_queen_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[4, 4] = "q"
chess_board[0, 0] = "R"
chess_board[7, 7] = "p"
# check the queen (at [44]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"44q",
"white"
)
# define the possible moves by this queen
valid_possible_moves = np.array(
[
'00', '04', '11', '14',
'17', '22', '24', '26',
'33', '34', '35', '40',
'41', '42', '43', '45',
'46', '47', '53', '54',
'55', '62', '64', '66',
'71', '74'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
def test_white_king_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[4, 4] = "k"
chess_board[3, 3] = "P"
chess_board[5, 5] = "r"
# check the king (at [44]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"44k",
"white"
)
# define the possible moves by this queen
valid_possible_moves = np.array(
[
'33', '34', '35', '43',
'45', '53', '54'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
# change the board state
chess_board[7, 7] = "k"
chess_board[6, 6] = "q"
chess_board[6, 7] = "P"
# check the king (at [77]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"77k",
"white"
)
# define the possible moves by this queen
valid_possible_moves = np.array(
[
'67', '76'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
def test_black_pawn_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[4, 4] = "P" # center pawn
chess_board[3, 5] = "r" # enemy piece to capture
chess_board[0, 4] = "P" # 'edge' pawn
# check the pawn (at [44]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"44P",
"black"
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
['34', '35']
)
)
# check the pawn (at [04]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"04P",
"black"
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
no_poss_moves
)
)
def test_black_rook_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[0, 0] = "R" # edge rook1
chess_board[7, 7] = "R" # edge rook2
chess_board[3, 7] = "P" # edge rook2
chess_board[2, 0] = "p" # edge rook2
# check the rook (at [00]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"00R",
"black"
)
# define the possible moves by this rook
valid_possible_moves = np.array(
[
'01', '02', '03', '04',
'05', '06', '07', '10',
'20'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
# check the rook (at [77]):
possible_moves = boardcontrol.valid_move_for_piece(
chess_board,
"77R",
"black"
)
# define the possible moves by this rook
valid_possible_moves = np.array(
[
'47', '57', '67', '70',
'71', '72', '73', '74',
'75', '76'
]
)
# assert the possible moves
self.assertIsNone(
np.testing.assert_array_equal(
possible_moves,
valid_possible_moves
)
)
def test_black_knight_movement(self):
"""This functions verifies the (possible) movements of pieces.
Following functions are tested:
o) boardcontrol.valid_move_for_piece(). Please note that the
ordering of the results from this function are dependent of
its inner workings, i.e., they are not ordered in any kind!
"""
"""
Define an (empty) array: represents no possible
moves returned by the function
boardcontrol.valid_move_for_piece().
"""
no_poss_moves = np.empty([0], dtype = str)
''' check (possible) pawn movements: '''
# create a board without any pieces on it
chess_board = np.empty([8, 8], dtype = str)
# populate the board
chess_board[0, 0] = "N"
chess_board[7, 7] = "N"
chess_board[2, 1] = "N"
chess_board[1, 2] = "r"
chess_board[5, 6] = "r"
# check the knight (at [00]):
possible_moves = | |
The constructor is
CartesianProduct. Such strategies by default assume
ignore_parent=True, inferrable=False, possibly_empty=False, and
workable=True.
The bijection maps an object a -> (b1, ..., bk) where bi is the object in
the child at index i returned by the decomposition function.
"""
def __init__(
self,
ignore_parent: bool = True,
inferrable: bool = False,
possibly_empty: bool = False,
workable: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
@staticmethod
def can_be_equivalent() -> bool:
return True
def constructor(
self,
comb_class: CombinatorialClassType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> Constructor:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
return CartesianProduct(
comb_class,
children,
extra_parameters=self.extra_parameters(comb_class, children),
)
@staticmethod
def get_op_symbol() -> str:
"""
Return a choice for '+' in the pretty print a '=' b '+' c of rules.
Your choice should be a single charachter.
"""
return "x"
class DisjointUnionStrategy(Strategy[CombinatorialClassType, CombinatorialObjectType]):
"""
The DisjointUnionStrategy is a subclass of Strategy. The constructor used
is DisjointUnion.
The bijection maps an object a -> (None, ..., b, ..., None) where b is at
the index of the child it belongs to.
"""
def __init__(
self,
ignore_parent: bool = False,
inferrable: bool = True,
possibly_empty: bool = True,
workable: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
@staticmethod
def can_be_equivalent() -> bool:
return True
def constructor(
self,
comb_class: CombinatorialClassType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> DisjointUnion:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
return DisjointUnion(
comb_class,
children,
extra_parameters=self.extra_parameters(comb_class, children),
)
@staticmethod
def backward_map_index(objs: Tuple[Optional[CombinatorialObjectType], ...]) -> int:
"""
Return the index of the comb_class that the sub_object returned.
"""
for idx, obj in enumerate(objs):
if obj is not None:
return idx
raise ObjectMappingError(
"For a disjoint union strategy, an object O is mapped to the tuple"
"with entries being None, except at the index of the child which "
"contains O, where it should be O."
)
def backward_map(
self,
comb_class: CombinatorialClassType,
objs: Tuple[Optional[CombinatorialObjectType], ...],
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> CombinatorialObjectType:
"""
This method will enable us to generate objects, and sample.
If it is a direct bijection, the below implementation will work!
"""
if children is None:
children = self.decomposition_function(comb_class)
idx = DisjointUnionStrategy.backward_map_index(objs)
return cast(CombinatorialObjectType, objs[idx])
@staticmethod
def get_op_symbol() -> str:
"""
Return a choice for '+' in the pretty print a '=' b '+' c of rules.
Your choice should be a single charachter.
"""
return "+"
class SymmetryStrategy(
DisjointUnionStrategy[CombinatorialClassType, CombinatorialObjectType]
):
"""General representation for a symmetry strategy."""
def __init__(
self,
ignore_parent: bool = False,
inferrable: bool = False,
possibly_empty: bool = False,
workable: bool = False,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
class VerificationStrategy(
AbstractStrategy[CombinatorialClassType, CombinatorialObjectType]
):
"""
For a VerificationStrategy you must implement the methods:
- verified: Return True if the combinatorial class is
verified by the strategy.
- pack The pack is used to count and generate the
objects. If the strategy doesn't have a CSS
strategy pack that can be used to enumerate
verified combinatorial classes, then you need
to implement the methods count_objects_of_size,
generate_objects_of_size, and get_genf.
- __repr__ and __str__: This is mostly for printing purposes!
- from_dict: A method that can recreate the class. The dict
passed is empty. If your strategy needs extra
parameters to recreate you should overwrite the
to_jsonable method.
If your verification strategy is for the atoms, consider using the
AtomStrategy, relying on CombinatorialClass methods.
"""
def __init__(
self, ignore_parent: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=False,
possibly_empty=False,
workable=False,
)
def __call__(
self,
comb_class: CombinatorialClassType,
children: Tuple[CombinatorialClassType, ...] = None,
**kwargs
) -> VerificationRule[CombinatorialClassType, CombinatorialObjectType]:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("The combinatorial class is not verified")
return VerificationRule(self, comb_class, children)
@staticmethod
def can_be_equivalent() -> bool:
return False
def pack(self, comb_class: CombinatorialClassType) -> "StrategyPack":
"""
Returns a StrategyPack that finds a proof tree for the comb_class in
which the verification strategies used are "simpler".
The pack is assumed to produce a finite universe.
"""
raise InvalidOperationError(f"can't find specification for {self}")
@abc.abstractmethod
def verified(self, comb_class: CombinatorialClassType) -> bool:
"""
Returns True if enumeration strategy works for the combinatorial class.
"""
def get_specification(
self, comb_class: CombinatorialClassType
) -> "CombinatorialSpecification[CombinatorialClassType, CombinatorialObjectType]":
"""
Returns a combinatorial specification for the combinatorial class.
Raises an `StrategyDoesNotApply` if no specification can be found,
e.g. if it is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
# pylint: disable=import-outside-toplevel
from ..comb_spec_searcher import CombinatorialSpecificationSearcher
searcher = CombinatorialSpecificationSearcher(comb_class, self.pack(comb_class))
specification = searcher.auto_search()
assert specification is not None, StrategyDoesNotApply(
"Cannot find a specification"
)
return specification
def get_genf(
self,
comb_class: CombinatorialClassType,
funcs: Optional[Dict[CombinatorialClassType, Function]] = None,
) -> Expr:
"""
Returns the generating function for the combinatorial class.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
return self.get_specification(comb_class).get_genf()
def decomposition_function(
self, comb_class: CombinatorialClassType
) -> Union[Tuple[CombinatorialClassType, ...], None]:
"""
A combinatorial class C is marked as verified by returning a rule
C -> (). This ensures that C is in a combinatorial specification as it
appears exactly once on the left hand side.
The function returns None if the verification strategy doesn't apply.
"""
if self.verified(comb_class):
return tuple()
return None
def count_objects_of_size(
self, comb_class: CombinatorialClassType, n: int, **parameters: int
) -> int:
"""
A method to count the objects.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
return int(
self.get_specification(comb_class).count_objects_of_size(n, **parameters)
)
def generate_objects_of_size(
self, comb_class: CombinatorialClassType, n: int, **parameters: int
) -> Iterator[CombinatorialObjectType]:
"""
A method to generate the objects.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
yield from self.get_specification(comb_class).generate_objects_of_size(
n, **parameters
)
def random_sample_object_of_size(
self, comb_class: CombinatorialClassType, n: int, **parameters: int
) -> CombinatorialObjectType:
"""
A method to sample uniformly at random from a verified combinatorial class.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
return self.get_specification(comb_class).random_sample_object_of_size(
n, **parameters
)
def to_jsonable(self) -> dict:
d = super().to_jsonable()
d.pop("inferrable")
d.pop("possibly_empty")
d.pop("workable")
return d
class AtomStrategy(VerificationStrategy[CombinatorialClass, CombinatorialObject]):
"""
A subclass for when a combinatorial class is an atom - meaning consisting
of a single object.
"""
def __init__(self):
super().__init__(ignore_parent=True)
@staticmethod
def count_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> int:
"""
Verification strategies must contain a method to count the objects.
"""
if comb_class.extra_parameters:
raise NotImplementedError
if n == comb_class.minimum_size_of_object():
return 1
return 0
def get_genf(
self,
comb_class: CombinatorialClass,
funcs: Optional[Dict[CombinatorialClass, Function]] = None,
) -> Expr:
if comb_class.extra_parameters:
raise NotImplementedError
if not self.verified(comb_class):
raise StrategyDoesNotApply("Can't find generating functon for non-atom.")
x = var("x")
return x ** comb_class.minimum_size_of_object()
@staticmethod
def generate_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> Iterator[CombinatorialObject]:
"""
Verification strategies must contain a method to generate the objects.
"""
if comb_class.extra_parameters:
raise NotImplementedError
if n == comb_class.minimum_size_of_object():
yield from comb_class.objects_of_size(n)
@staticmethod
def random_sample_object_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> CombinatorialObject:
if comb_class.extra_parameters:
raise NotImplementedError
if n == comb_class.minimum_size_of_object():
obj: CombinatorialObject = next(comb_class.objects_of_size(n))
return obj
@staticmethod
def verified(comb_class: CombinatorialClass) -> bool:
return bool(comb_class.is_atom())
@staticmethod
def formal_step() -> str:
return "is atom"
@staticmethod
def pack(comb_class: CombinatorialClass) -> "StrategyPack":
raise InvalidOperationError("No pack for the empty strategy.")
def to_jsonable(self) -> dict:
d: dict = super().to_jsonable()
d.pop("ignore_parent")
return d
@classmethod
def from_dict(cls, d: dict) -> "AtomStrategy":
assert not d
return cls()
def __repr__(self) -> str:
return self.__class__.__name__ + f"(ignore_parent={self.ignore_parent})"
def __str__(self) -> str:
return "verify atoms"
class EmptyStrategy(VerificationStrategy[CombinatorialClass, CombinatorialObject]):
"""
A subclass for when a combinatorial class is equal to the empty set.
"""
def __init__(self):
super().__init__(ignore_parent=True)
@staticmethod
def count_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> int:
"""
Verification strategies must contain a method to count the objects.
"""
return 0
def get_genf(
self,
| |
self.FindLoop(keyname)
if loop_no >= 0:
return self.loops[loop_no]
else:
raise KeyError('%s is not in any loop' % keyname)
def AddToLoop(self,dataname,loopdata):
thisloop = self.GetLoop(dataname)
for itemname,itemvalue in loopdata.items():
thisloop[itemname] = itemvalue
def AddToLoop(self,dataname,loopdata):
"""*Deprecated*. Use `AddItem` followed by calls to `AddLoopName`.
Add multiple columns to the loop containing `dataname`. `loopdata` is a
collection of (key,value) pairs, where `key` is the new dataname and `value`
is a list of values for that dataname"""
self.update(loopdata)
for one_name in loopdata:
self.AddLoopName(dataname,one_name)
class StarBlock(object):
def __init__(self,data = (), maxoutlength=2048, wraplength=80, overwrite=True,
characterset='ascii',maxnamelength=-1):
self.block = {} #the actual data storage (lower case keys)
self.loops = {} #each loop is indexed by a number and contains a list of datanames
self.item_order = [] #lower case, loops referenced by integer
self.formatting_hints = {}
self.true_case = {} #transform lower case to supplied case
self.provide_value = False #prefer string version always
self.dictionary = None #DDLm dictionary
self.popout = False #used during load iteration
self.curitem = -1 #used during iteration
self.cache_vals = True #store all calculated values
self.maxoutlength = maxoutlength
self.setmaxnamelength(maxnamelength) #to enforce CIF limit of 75 characters
self.set_characterset(characterset) #to check input names
self.wraplength = wraplength
self.overwrite = overwrite
self.string_delimiters = ["'",'"',"\n;"] #universal CIF set
self.list_delimiter = " " #CIF2 default
self.wrapper = textwrap.TextWrapper()
if isinstance(data,(tuple,list)):
for item in data:
self.AddLoopItem(item)
elif isinstance(data,StarBlock):
self.block = data.block.copy()
self.item_order = data.item_order[:]
self.true_case = data.true_case.copy()
# loops as well
self.loops = data.loops.copy()
def setmaxnamelength(self,maxlength):
"""Set the maximum allowable dataname length (-1 for no check)"""
self.maxnamelength = maxlength
if maxlength > 0:
bad_names = [a for a in self.keys() if len(a)>self.maxnamelength]
if len(bad_names)>0:
raise StarError('Datanames too long: ' + repr( bad_names ))
def set_characterset(self,characterset):
"""Set the characterset for checking datanames: may be `ascii` or `unicode`"""
self.characterset = characterset
if characterset == 'ascii':
self.char_check = re.compile("[][ \n\r\t!%&\(\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\|~\"#$';_-]+",re.M)
elif characterset == 'unicode':
if sys.maxunicode < 1114111:
self.char_check = re.compile(u"[][ \n\r\t!%&\(\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\|~\"#$';_\u00A0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFD-]+",re.M)
else:
self.char_check = re.compile(u"[][ \n\r\t!%&\(\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\|~\"#$';_\u00A0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFD\U00010000-\U0010FFFD-]+",re.M)
def __str__(self):
return self.printsection()
def __setitem__(self,key,value):
if key == "saves":
raise StarError("""Setting the saves key is deprecated. Add the save block to
an enclosing block collection (e.g. CIF or STAR file) with this block as child""")
self.AddItem(key,value)
def __getitem__(self,key):
if key == "saves":
raise StarError("""The saves key is deprecated. Access the save block from
the enclosing block collection (e.g. CIF or STAR file object)""")
try:
rawitem,is_value = self.GetFullItemValue(key)
except KeyError:
if self.dictionary:
# send the dictionary the required key and a pointer to us
try:
new_value = self.dictionary.derive_item(key,self,store_value=self.cache_vals,allow_defaults=False)
except StarDerivationFailure: #try now with defaults included
try:
new_value = self.dictionary.derive_item(key,self,store_value=self.cache_vals,allow_defaults=True)
except StarDerivationFailure as s:
print("In StarBlock.__getitem__, " + repr(s))
raise KeyError('No such item: %s' % key)
print('Set %s to derived value %s' % (key, repr(new_value)))
return new_value
else:
raise KeyError('No such item: %s' % key)
# we now have an item, we can try to convert it to a number if that is appropriate
# note numpy values are never stored but are converted to lists
if not self.dictionary or not key in self.dictionary: return rawitem
print('%s: is_value %s provide_value %s value %s' % (key,repr( is_value ),repr( self.provide_value ),repr( rawitem )))
if is_value:
if self.provide_value: return rawitem
else:
print('Turning %s into string' % repr( rawitem ))
return self.convert_to_string(key)
else: # a string
if self.provide_value and ((not isinstance(rawitem,list) and rawitem != '?' and rawitem != ".") or \
(isinstance(rawitem,list) and '?' not in rawitem and '.' not in rawitem)):
return self.dictionary.change_type(key,rawitem)
elif self.provide_value: # catch the question marks
do_calculate = False
if isinstance(rawitem,(list,tuple)):
known = [a for a in rawitem if a != '?']
if len(known) == 0: #all questions
do_calculate = True
elif rawitem == '?':
do_calculate = True
if do_calculate:
# remove old value
del self[key]
try:
new_value = self.dictionary.derive_item(key,self,store_value=True,allow_defaults=False)
except StarDerivationFailure as s:
try:
new_value = self.dictionary.derive_item(key,self,store_value=True,allow_defaults=True)
except StarDerivationFailure as s:
print("Could not turn %s into a value:" + repr(s))
return rawitem
else:
print('Set %s to derived value %s' % (key, repr( new_value )))
return new_value
return rawitem #can't do anything
def __delitem__(self,key):
self.RemoveItem(key)
def __len__(self):
blen = len(self.block)
return blen
def __nonzero__(self):
if self.__len__() > 0: return 1
return 0
# keys returns all internal keys
def keys(self):
return list(self.block.keys()) #always lower case
def values(self):
return [self[a] for a in self.keys()]
def items(self):
return list(zip(self.keys(),self.values()))
def __contains__(self,key):
if isinstance(key,(unicode,str)) and key.lower() in self.keys():
return True
return False
def has_key(self,key):
return key in self
def has_key_or_alias(self,key):
"""Check if a dataname or alias is available in the block"""
initial_test = key in self
if initial_test: return True
elif self.dictionary:
aliases = [k for k in self.dictionary.alias_table.get(key,[]) if self.has_key(k)]
if len(aliases)>0:
return True
return False
def get(self,key,default=None):
if key in self:
retval = self.__getitem__(key)
else:
retval = default
return retval
def clear(self):
self.block = {}
self.loops = {}
self.item_order = []
self.true_case = {}
# doesn't appear to work
def copy(self):
newcopy = StarBlock()
newcopy.block = self.block.copy()
newcopy.loops = []
newcopy.item_order = self.item_order[:]
newcopy.true_case = self.true_case.copy()
newcopy.loops = self.loops.copy()
# return self.copy.im_class(newcopy) #catch inheritance
return newcopy
def update(self,adict):
for key in adict.keys():
self.AddItem(key,adict[key])
def GetItemPosition(self,itemname):
"""A utility function to get the numerical order in the printout
of `itemname`. An item has coordinate `(loop_no,pos)` with
the top level having a `loop_no` of -1. If an integer is passed to
the routine then it will return the position of the loop
referenced by that number."""
if isinstance(itemname,int):
# return loop position
return (-1, self.item_order.index(itemname))
if not itemname in self:
raise ValueError('No such dataname %s' % itemname)
testname = itemname.lower()
if testname in self.item_order:
return (-1,self.item_order.index(testname))
loop_no = self.FindLoop(testname)
loop_pos = self.loops[loop_no].index(testname)
return loop_no,loop_pos
def ChangeItemOrder(self,itemname,newpos):
"""Move the printout order of `itemname` to `newpos`. If `itemname` is
in a loop, `newpos` refers to the order within the loop."""
if isinstance(itemname,(unicode,str)):
true_name = itemname.lower()
else:
true_name = itemname
loopno = self.FindLoop(true_name)
if loopno < 0: #top level
self.item_order.remove(true_name)
self.item_order.insert(newpos,true_name)
else:
self.loops[loopno].remove(true_name)
self.loops[loopno].insert(newpos,true_name)
def GetItemOrder(self):
"""Return a list of datanames in the order in which they will be printed. Loops are
referred to by numerical index"""
return self.item_order[:]
def AddItem(self,key,value,precheck=False):
"""Add dataname `key` to block with value `value`. `value` may be
a single value, a list or a tuple. If `precheck` is False (the default),
all values will be checked and converted to unicode strings as necessary. If
`precheck` is True, this checking is bypassed. No checking is necessary
when values are read from a CIF file as they are already in correct form."""
if not isinstance(key,(unicode,str)):
raise TypeError('Star datanames are strings only (got %s)' % repr( key ))
key = unicode(key) #everything is unicode internally
if not precheck:
self.check_data_name(key,self.maxnamelength) # make sure no nasty characters
# check for overwriting
if key in self:
if not self.overwrite:
raise StarError( 'Attempt to insert duplicate item name %s' % key)
if not precheck: #need to sanitise
regval,empty_val = self.regularise_data(value)
pure_string = check_stringiness(regval)
self.check_item_value(regval)
else:
regval,empty_val = value,None
pure_string = True
# update ancillary information first
lower_key = key.lower()
if not lower_key in self and self.FindLoop(lower_key)<0: #need to add to order
self.item_order.append(lower_key)
# always remove from our case table in case the case is different
try:
del self.true_case[lower_key]
except KeyError:
pass
self.true_case[lower_key] = key
if pure_string:
self.block.update({lower_key:[regval,empty_val]})
else:
self.block.update({lower_key:[empty_val,regval]})
def AddLoopItem(self,incomingdata,precheck=False,maxlength=-1):
"""*Deprecated*. Use `AddItem` followed by `CreateLoop` if
necessary."""
# print "Received data %s" % `incomingdata`
# we accept tuples, strings, lists and dicts!!
# Direct insertion: we have a string-valued key, with an array
# of values -> single-item into our loop
if isinstance(incomingdata[0],(tuple,list)):
# a whole loop
keyvallist = zip(incomingdata[0],incomingdata[1])
for key,value in keyvallist:
self.AddItem(key,value)
self.CreateLoop(incomingdata[0])
elif not isinstance(incomingdata[0],(unicode,str)):
raise TypeError('Star datanames are strings only (got %s)' % repr( incomingdata[0] ))
else:
self.AddItem(incomingdata[0],incomingdata[1])
def check_data_name(self,dataname,maxlength=-1):
if maxlength > 0:
self.check_name_length(dataname,maxlength)
if dataname[0]!='_':
raise StarError( 'Dataname ' + dataname + ' does not begin with _')
if self.characterset=='ascii':
if len ([a for a in dataname if ord(a) < 33 or ord(a) > 126]) > 0:
raise StarError( 'Dataname ' + dataname + | |
coron_shift_x_orig
self.options['coron_shift_y'] = coron_shift_y_orig
self.options['bar_offset'] = bar_offset_orig
# Crop distorted borders
if add_distortion:
osamp = hdul[0].header['DET_SAMP']
npix_over = npix_extra * osamp
hdul[0].data = hdul[0].data[npix_over:-npix_over,npix_over:-npix_over]
hdul[2].data = hdul[2].data[npix_over:-npix_over,npix_over:-npix_over]
hdul[1].data = hdul[1].data[npix_extra:-npix_extra,npix_extra:-npix_extra]
hdul[3].data = hdul[3].data[npix_extra:-npix_extra,npix_extra:-npix_extra]
# Check if we set return_hdul=False
if return_hdul:
res = hdul
else:
# If just returning a single image, determine oversample and distortion
res = hdul[2].data if add_distortion else hdul[0].data
if not return_oversample:
res = frebin(res, scale=1/self.oversample)
return res
def _inst_copy(self):
""" Return a copy of the current instrument class. """
# Change log levels to WARNING for webbpsf_ext, WebbPSF, and POPPY
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
init_params = {
'filter' : self.filter,
'pupil_mask': self.pupil_mask,
'image_mask': self.image_mask,
'fov_pix' : self.fov_pix,
'oversample': self.oversample,
'auto_gen_coeffs': False
}
# Init same subclass
if self.name=='NIRCam':
inst = NIRCam_ext(**init_params)
elif self.name=='MIRI':
inst = MIRI_ext(**init_params)
# Get OPD info
inst.pupilopd = deepcopy(self.pupilopd)
inst.pupil = deepcopy(self.pupil)
# Detector and aperture info
inst._detector = self._detector
inst._detector_position = self._detector_position
inst._aperturename = self._aperturename
# Other options
inst.options = self.options
# PSF coeff info
inst.use_legendre = self.use_legendre
inst._ndeg = self._ndeg
inst._npsf = self._npsf
inst._quick = self._quick
# SI WFE and distortions
inst.include_si_wfe = self.include_si_wfe
inst.include_distortions = self.include_distortions
### Instrument-specific parameters
# Grism order for NIRCam
try: inst._grism_order = self._grism_order
except: pass
# ND square for NIRCam
try: inst._ND_acq = self._ND_acq
except: pass
setup_logging(log_prev, verbose=False)
return inst
def _wrap_coeff_for_mp(args):
"""
Internal helper routine for parallelizing computations across multiple processors
for multiple WebbPSF monochromatic calculations.
args => (inst,w,fov_pix,oversample)
"""
# No multiprocessing for monochromatic wavelengths
mp_prev = poppy.conf.use_multiprocessing
poppy.conf.use_multiprocessing = False
inst, w = args
try:
hdu_list = inst.calc_psf(monochromatic=w*1e-6, crop_psf=True)
except Exception as e:
print('Caught exception in worker thread (w = {}):'.format(w))
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
print('')
#raise e
poppy.conf.use_multiprocessing = mp_prev
return None
# Return to previous setting
poppy.conf.use_multiprocessing = mp_prev
# Return distorted PSF
if inst.include_distortions:
hdu = hdu_list[2]
else:
hdu = hdu_list[0]
# Rather than storing
hdu.header['OSAMP'] = (inst.oversample, 'Image oversample vs det')
return hdu
def _gen_psf_coeff(self, nproc=None, wfe_drift=0, force=False, save=True,
return_results=False, return_extras=False, **kwargs):
"""Generate PSF coefficients
Creates a set of coefficients that will generate simulated PSFs for any
arbitrary wavelength. This function first simulates a number of evenly-
spaced PSFs throughout the specified bandpass (or the full channel).
An nth-degree polynomial is then fit to each oversampled pixel using
a linear-least squares fitting routine. The final set of coefficients
for each pixel is returned as an image cube. The returned set of
coefficient are then used to produce PSF via `calc_psf_from_coeff`.
Useful for quickly generated imaging and dispersed PSFs for multiple
spectral types.
Parameters
----------
nproc : bool or None
Manual setting of number of processor cores to break up PSF calculation.
If set to None, this is determined based on the requested PSF size,
number of available memory, and hardware processor cores. The automatic
calculation endeavors to leave a number of resources available to the
user so as to not crash the user's machine.
wfe_drift : float
Wavefront error drift amplitude in nm.
force : bool
Forces a recalculation of PSF even if saved PSF exists. (default: False)
save : bool
Save the resulting PSF coefficients to a file? (default: True)
return_results : bool
By default, results are saved as object the attributes `psf_coeff` and
`psf_coeff_header`. If return_results=True, results are instead returned
as function outputs and will not be saved to the attributes. This is mostly
used for successive coeff simulations to determine varying WFE drift or
focal plane dependencies.
return_extras : bool
Additionally returns a dictionary of monochromatic PSFs images and their
corresponding wavelengths for debugging purposes. Can be used with or without
`return_results`. If `return_results=False`, then only this dictionary is
returned, otherwise if `return_results=False` then returns everything as a
3-element tuple (psf_coeff, psf_coeff_header, extras_dict).
"""
save_name = self.save_name
outfile = str(self.save_dir / save_name)
# Load data from already saved FITS file
if os.path.exists(outfile) and (not force):
if return_extras:
_log.warn("return_extras only valid if coefficient files does not exist or force=True")
_log.info(f'Loading {outfile}')
hdul = fits.open(outfile)
data = hdul[0].data.astype(np.float)
hdr = hdul[0].header
hdul.close()
# Output if return_results=True, otherwise save to attributes
if return_results:
return data, hdr
else:
del self.psf_coeff, self.psf_coeff_header
self.psf_coeff = data
self.psf_coeff_header = hdr
return
temp_str = 'and saving' if save else 'but not saving'
_log.info(f'Generating {temp_str} PSF coefficient')
# Change log levels to WARNING for webbpsf_ext, WebbPSF, and POPPY
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
# w1 = self.bandpass.wave.min() / 1e4
# w2 = self.bandpass.wave.max() / 1e4
w1, w2 = self.wave_fit
npsf = self.npsf
waves = np.linspace(w1, w2, npsf)
fov_pix = self.fov_pix
oversample = self.oversample
# Get OPD info and convert to OTE LM
opd_dict = self.get_opd_info(HDUL_to_OTELM=True)
opd_name = opd_dict['opd_name']
opd_num = opd_dict['opd_num']
opd_str = opd_dict['opd_str']
opd = opd_dict['pupilopd']
# Drift OPD
if wfe_drift!=0:
wfe_dict = self.drift_opd(wfe_drift, opd=opd)
else:
wfe_dict = {'therm':0, 'frill':0, 'iec':0, 'opd':opd}
opd_new = wfe_dict['opd']
# Save copies
pupilopd_orig = deepcopy(self.pupilopd)
pupil_orig = deepcopy(self.pupil)
self.pupilopd = opd_new
self.pupil = opd_new
# How many processors to split into?
if nproc is None:
nproc = nproc_use(fov_pix, oversample, npsf)
_log.debug('nprocessors: {}; npsf: {}'.format(nproc, npsf))
# Make a paired down copy of self with limited data for
# copying to multiprocessor theads. This reduces memory
# swapping overheads and limitations.
inst_copy = _inst_copy(self) if nproc > 1 else self
t0 = time.time()
# Setup the multiprocessing pool and arguments to pass to each pool
worker_arguments = [(inst_copy, wlen) for wlen in waves]
if nproc > 1:
hdu_arr = []
try:
with mp.Pool(nproc) as pool:
for res in tqdm(pool.imap(_wrap_coeff_for_mp, worker_arguments), total=npsf, desc='Single PSFs', leave=False):
hdu_arr.append(res)
pool.close()
if hdu_arr[0] is None:
raise RuntimeError('Returned None values. Issue with multiprocess or WebbPSF??')
except Exception as e:
_log.error('Caught an exception during multiprocess.')
_log.info('Closing multiprocess pool.')
pool.terminate()
pool.close()
raise e
else:
_log.info('Closing multiprocess pool.')
del inst_copy, worker_arguments
else:
# Pass arguments to the helper function
hdu_arr = []
for wa in tqdm(worker_arguments, desc='Single PSFs', leave=False):
hdu = _wrap_coeff_for_mp(wa)
if hdu is None:
raise RuntimeError('Returned None values. Issue with WebbPSF??')
hdu_arr.append(hdu)
t1 = time.time()
# Ensure PSF sum is not larger than 1.0
# This can sometimes occur for distorted PSFs near edges
for hdu in hdu_arr:
data_sum = hdu.data.sum()
# print(data_sum)
if data_sum>1:
hdu.data /= data_sum
# Reset pupils
self.pupilopd = pupilopd_orig
self.pupil = pupil_orig
# Reset to original log levels
setup_logging(log_prev, verbose=False)
time_string = 'Took {:.2f} seconds to generate WebbPSF images'.format(t1-t0)
_log.info(time_string)
# Extract image data from HDU array
images = []
for hdu in hdu_arr:
images.append(hdu.data)
# Turn results into a numpy array (npsf,ny,nx)
images = np.asarray(images)
# Simultaneous polynomial fits to all pixels using linear least squares
use_legendre = self.use_legendre
ndeg = self.ndeg
coeff_all = jl_poly_fit(waves, images, deg=ndeg, use_legendre=use_legendre, lxmap=[w1,w2])
################################
# Create HDU and header
################################
hdu = fits.PrimaryHDU(coeff_all)
hdr = hdu.header
head_temp = hdu_arr[0].header
hdr['DESCR'] = ('PSF Coeffecients', 'File Description')
hdr['NWAVES'] = (npsf, 'Number of wavelengths used in calculation')
copy_keys = [
'EXTNAME', 'OSAMP', 'OVERSAMP', 'DET_SAMP', 'PIXELSCL', 'FOV',
'INSTRUME', 'FILTER', 'PUPIL', 'CORONMSK',
'WAVELEN', 'DIFFLMT', 'APERNAME', 'MODULE', 'CHANNEL', 'PILIN',
'DET_NAME', 'DET_X', 'DET_Y', 'DET_V2', 'DET_V3',
'GRATNG14', 'GRATNG23', 'FLATTYPE', 'CCCSTATE', 'TACQNAME',
'PUPILINT', 'PUPILOPD', 'OPD_FILE', 'OPDSLICE', 'TEL_WFE',
'SI_WFE', 'SIWFETYP', 'SIWFEFPT',
'ROTATION', 'DISTORT', 'SIAF_VER', 'MIR_DIST', 'KERN_AMP', 'KERNFOLD',
'NORMALIZ', 'FFTTYPE', 'AUTHOR', 'DATE', 'VERSION', 'DATAVERS'
]
for key in copy_keys:
try:
hdr[key] = (head_temp[key], head_temp.comments[key])
except (AttributeError, KeyError):
pass
# hdr[key] = ('none', 'No key found')
hdr['WEXTVERS'] = (__version__, "webbpsf_ext version")
# Update keywords
hdr['PUPILOPD'] = (opd_name, 'Original Pupil OPD source')
hdr['OPDSLICE'] = (opd_num, 'OPD slice index')
# Source positioning
offset_r = self.options.get('source_offset_r', 'None')
offset_theta = self.options.get('source_offset_theta', 'None')
# Mask offsetting
coron_shift_x = self.options.get('coron_shift_x', 'None')
coron_shift_y = self.options.get('coron_shift_y', 'None')
bar_offset = self.options.get('bar_offset', 'None')
# Jitter settings
jitter = self.options.get('jitter')
jitter_sigma = self.options.get('jitter_sigma', | |
<reponame>augustoproiete-forks/pandas-dev--pandas
#!/usr/bin/env python
# coding: utf-8
import nose
import itertools
import os
import string
import warnings
from distutils.version import LooseVersion
from pandas import Series, DataFrame, MultiIndex
from pandas.compat import range, lmap, lzip
import pandas.util.testing as tm
from pandas.util.testing import slow
import numpy as np
from numpy import random
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.test_graphics import (TestPlotBase, _check_plot_works,
curpath, _ok_for_gaussian_kde)
"""
These tests are for ``DataFrame.hist``, ``DataFrame.boxplot`` and
other miscellaneous plots.
`Dataframe.plot`` and ``Series.plot`` are tested in test_graphics.py
"""
def _skip_if_mpl_14_or_dev_boxplot():
# GH 8382
# Boxplot failures on 1.4 and 1.4.1
# Don't need try / except since that's done at class level
import matplotlib
if str(matplotlib.__version__) >= LooseVersion('1.4'):
raise nose.SkipTest("Matplotlib Regression in 1.4 and current dev.")
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
axes = _check_plot_works(
df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender) # noqa
self.assertEqual(len(self.plt.get_fignums()), 1)
@slow
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, series=self.ts)
_check_plot_works(autocorrelation_plot, series=self.ts.values)
ax = autocorrelation_plot(self.ts, label='Test')
self._check_legend_labels(ax, labels=['Test'])
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, series=self.ts)
_check_plot_works(lag_plot, series=self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, series=self.ts, size=10)
@tm.mplskip
class TestDataFramePlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20)})
from pandas import read_csv
path = os.path.join(curpath(), 'data', 'iris.csv')
self.iris = read_csv(path)
@slow
def test_boxplot_legacy(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot, return_type='dict')
_check_plot_works(df.boxplot, column=[
'one', 'two'], return_type='dict')
_check_plot_works(df.boxplot, column=['one', 'two'], by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting.boxplot, data=df['one'], return_type='dict')
_check_plot_works(df.boxplot, notch=1, return_type='dict')
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
df['Y'] = Series(['A'] * 10)
_check_plot_works(df.boxplot, by='X')
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot('Col1', by='X', ax=ax)
self.assertIs(ax.get_axes(), axes)
fig, ax = self.plt.subplots()
axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')
self.assertIs(ax.get_axes(), axes['A'])
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
axes = df.boxplot(column=['Col1', 'Col2'],
by='X', ax=ax, return_type='axes')
self.assertIs(axes['Col1'].get_figure(), fig)
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type='dict')
lines = list(itertools.chain.from_iterable(d.values()))
self.assertEqual(len(ax.get_lines()), len(lines))
@slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pydata/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with tm.assertRaises(ValueError):
df.boxplot(return_type='NOTATYPE')
with tm.assert_produces_warning(FutureWarning):
result = df.boxplot()
# change to Axes in future
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='dict')
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='axes')
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
self.assertTrue(y_min <= col.min())
self.assertTrue(y_max >= col.max())
df = self.hist_df.copy()
df['age'] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
self.assertEqual(weight_ax._sharey, height_ax)
# Two rows, one partial
p = df.boxplot(['height', 'weight', 'age'], by='category')
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
_check_ax_limits(df['age'], age_ax)
self.assertEqual(weight_ax._sharey, height_ax)
self.assertEqual(age_ax._sharey, height_ax)
self.assertIsNone(dummy_ax._sharey)
@slow
def test_boxplot_empty_column(self):
_skip_if_mpl_14_or_dev_boxplot()
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
@slow
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 3))
axes = _check_plot_works(df.hist, grid=False)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertFalse(axes[1, 1].get_visible())
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
axes = _check_plot_works(df.hist, layout=(4, 2))
self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
_check_plot_works(df.hist, figsize=(8, 10))
# check bins argument
_check_plot_works(df.hist, bins=5)
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-1].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self._check_ax_scales(ax, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with tm.assertRaises(AttributeError):
ser.hist(foo='bar')
@slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
{'layout': (-1, 4), 'expected_size': (1, 4)},
{'layout': (4, -1), 'expected_size': (4, 1)},
{'layout': (-1, 2), 'expected_size': (2, 2)},
{'layout': (2, -1), 'expected_size': (2, 2)}
)
for layout_test in layout_to_expected_size:
axes = df.hist(layout=layout_test['layout'])
expected = layout_test['expected_size']
self._check_axes_shape(axes, axes_num=3, layout=expected)
# layout too small for all 4 plots
with tm.assertRaises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with tm.assertRaises(ValueError):
df.hist(layout=(1,))
with tm.assertRaises(ValueError):
df.hist(layout=(-1, -1))
@slow
def test_scatter_plot_legacy(self):
tm._skip_if_no_scipy()
df = DataFrame(randn(100, 2))
def scat(**kwds):
return plotting.scatter_matrix(df, **kwds)
_check_plot_works(scat)
_check_plot_works(scat, marker='+')
_check_plot_works(scat, vmin=0)
if _ok_for_gaussian_kde('kde'):
_check_plot_works(scat, diagonal='kde')
if _ok_for_gaussian_kde('density'):
_check_plot_works(scat, diagonal='density')
_check_plot_works(scat, diagonal='hist')
_check_plot_works(scat, range_padding=.1)
def scat2(x, y, by=None, ax=None, figsize=None):
return plotting.scatter_plot(df, x, y, by, ax, figsize=None)
_check_plot_works(scat2, x=0, y=1)
grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index)
_check_plot_works(scat2, x=0, y=1, by=grouper)
def test_scatter_matrix_axis(self):
tm._skip_if_no_scipy()
scatter_matrix = plotting.scatter_matrix
with tm.RNGContext(42):
df = DataFrame(randn(100, 3))
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(scatter_matrix, filterwarnings='always',
frame=df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
# GH 5662
expected = ['-2', '-1', '0', '1', '2']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(
axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
df[0] = ((df[0] - 2) / 3)
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(scatter_matrix, filterwarnings='always',
frame=df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
expected = ['-1.2', '-1.0', '-0.8', '-0.6', '-0.4', '-0.2', '0.0']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(
axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
@slow
def test_andrews_curves(self):
from pandas.tools.plotting import andrews_curves
from matplotlib import cm
df = self.iris
_check_plot_works(andrews_curves, frame=df, class_column='Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=rgba)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=cnames)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10])
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10])
length = 10
df = DataFrame({"A": random.rand(length),
"B": random.rand(length),
"C": random.rand(length),
"Name": ["A"] * length})
_check_plot_works(andrews_curves, frame=df, class_column='Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=rgba)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
| |
<reponame>justinnoah/typhon
# Copyright (C) 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.lltypesystem.lltype import scoped_alloc
from rpython.rtyper.lltypesystem.rffi import charpsize2str
from typhon import log, rsodium, ruv
from typhon.atoms import getAtom
from typhon.autohelp import autohelp, method
from typhon.enum import makeEnum
from typhon.errors import userError
from typhon.objects.constants import NullObject
from typhon.objects.data import BytesObject, StrObject, unwrapStr
from typhon.objects.refs import LocalResolver, makePromise
from typhon.objects.root import Object, runnable
from typhon.vats import currentVat, scopedVat
ABORTFLOW_0 = getAtom(u"abortFlow", 0)
FLOWABORTED_1 = getAtom(u"flowAborted", 1)
FLOWSTOPPED_1 = getAtom(u"flowStopped", 1)
RECEIVE_1 = getAtom(u"receive", 1)
RUN_1 = getAtom(u"run", 1)
@autohelp
class FileUnpauser(Object):
"""
A pause on a file fount.
"""
def __init__(self, fount):
self.fount = fount
@method("Void")
def unpause(self):
if self.fount is not None:
self.fount.unpause()
# Let go so that the fount can be GC'd if necessary.
self.fount = None
def readCB(fs):
# Does *not* invoke user code.
try:
size = intmask(fs.c_result)
with ruv.unstashingFS(fs) as (vat, fount):
assert isinstance(fount, FileFount)
# Done with fs, but don't free it; it belongs to the fount.
if size > 0:
data = charpsize2str(fount.buf.c_base, size)
fount.receive(data)
elif size < 0:
msg = ruv.formatError(size).decode("utf-8")
fount.abort(u"libuv error: %s" % msg)
else:
fount.stop(u"End of file")
except:
print "Exception in readCB"
@autohelp
class FileFount(Object):
"""
A fount for a file.
"""
pauses = 0
pos = 0
def __init__(self, fs, fd, vat):
self.fs = fs
self.fd = fd
self.vat = vat
# XXX read size should be tunable
self.buf = ruv.allocBuf(16384)
# Set this up only once.
ruv.stashFS(self.fs, (self.vat, self))
@method("Any", "Any")
def flowTo(self, drain):
self.drain = drain
rv = drain.call(u"flowingFrom", [self])
self.queueRead()
return rv
@method("Any")
def pauseFlow(self):
return self.pause()
@method("Void")
def stopFlow(self):
self.stop(u"stopFlow() called")
@method("Void")
def abortFlow(self):
self.abort(u"abortFlow() called")
def stop(self, reason):
from typhon.objects.collections.maps import EMPTY_MAP
self.vat.sendOnly(self.drain, FLOWSTOPPED_1, [StrObject(reason)],
EMPTY_MAP)
self.close()
def abort(self, reason):
from typhon.objects.collections.maps import EMPTY_MAP
self.vat.sendOnly(self.drain, FLOWABORTED_1, [StrObject(reason)],
EMPTY_MAP)
self.close()
def close(self):
uv_loop = self.vat.uv_loop
ruv.fsClose(uv_loop, self.fs, self.fd, ruv.fsDiscard)
ruv.freeBuf(self.buf)
self.drain = None
def pause(self):
self.pauses += 1
return FileUnpauser(self)
def unpause(self):
self.pauses -= 1
if not self.pauses:
self.queueRead()
def queueRead(self):
with scoped_alloc(ruv.rffi.CArray(ruv.buf_t), 1) as bufs:
bufs[0].c_base = self.buf.c_base
bufs[0].c_len = self.buf.c_len
ruv.fsRead(self.vat.uv_loop, self.fs, self.fd, bufs, 1, -1,
readCB)
def receive(self, data):
from typhon.objects.collections.maps import EMPTY_MAP
# Advance the file pointer.
self.pos += len(data)
self.vat.sendOnly(self.drain, RECEIVE_1, [BytesObject(data)],
EMPTY_MAP)
self.queueRead()
def writeCB(fs):
try:
with ruv.unstashingFS(fs) as (vat, drain):
assert isinstance(drain, FileDrain)
size = intmask(fs.c_result)
if size > 0:
drain.written(size)
elif size < 0:
msg = ruv.formatError(size).decode("utf-8")
drain.abort(u"libuv error: %s" % msg)
except:
print "Exception in writeCB"
@autohelp
class FileDrain(Object):
"""
A drain for a file.
"""
fount = None
pos = 0
# State machine:
# * READY: Bufs are empty.
# * WRITING: Bufs are partially full. Write is pending.
# * BUSY: Bufs are overfull. Write is pending.
# * CLOSING: Bufs are partially full. Write is pending. New writes cause
# exceptions.
# * CLOSED: Bufs are empty. New writes cause exceptions.
READY, WRITING, BUSY, CLOSING, CLOSED = makeEnum(u"FileDrain",
u"ready writing busy closing closed".split())
_state = READY
def __init__(self, fs, fd, vat):
self.fs = fs
self.fd = fd
self.vat = vat
self.bufs = []
# Set this up only once.
ruv.stashFS(self.fs, (self.vat, self))
@method("Any", "Any")
def flowingFrom(self, fount):
self.fount = fount
return self
@method("Void", "Any")
def flowStopped(self, unused):
# Prepare to shut down. Switch to CLOSING to stop future data from
# being queued.
self.closing()
@method("Void", "Any")
def flowAborted(self, unused):
# We'll shut down cleanly, but we're going to discard all the work
# that we haven't yet written.
self.bufs = []
self.closing()
@method("Void", "Bytes")
def receive(self, data):
if self._state in (self.READY, self.WRITING, self.BUSY):
self.bufs.append(data)
if self._state is self.READY:
# We're not writing right now, so queue a write.
self.queueWrite()
self._state = self.WRITING
else:
raise userError(u"Can't write to drain in state %s" %
self._state.repr)
def abort(self, reason):
if self.fount is not None:
with scopedVat(self.vat):
from typhon.objects.collections.maps import EMPTY_MAP
self.vat.sendOnly(self.fount, ABORTFLOW_0, [], EMPTY_MAP)
self.closing()
def queueWrite(self):
with ruv.scopedBufs(self.bufs) as bufs:
ruv.fsWrite(self.vat.uv_loop, self.fs, self.fd, bufs,
len(self.bufs), -1, writeCB)
def closing(self):
if self._state is self.READY:
# Optimization: proceed directly to CLOSED if there's no
# outstanding writes.
self.queueClose()
self._state = self.CLOSING
def queueClose(self):
ruv.fsClose(self.vat.uv_loop, self.fs, self.fd, ruv.fsDiscard)
self._state = self.CLOSED
def written(self, size):
self.pos += size
bufs = []
for buf in self.bufs:
if size >= len(buf):
size -= len(buf)
elif size == 0:
bufs.append(buf)
else:
assert size >= 0
bufs.append(buf[size:])
size = 0
self.bufs = bufs
if self.bufs:
# More bufs remain to write. Queue them.
self.queueWrite()
# If we were CLOSING before, we're still CLOSING now. Otherwise,
# we transition (from READY/WRITING) to WRITING.
if self._state is not self.CLOSING:
self._state = self.WRITING
elif self._state is self.CLOSING:
# Finally, we're out of things to do. Request a close.
self.queueClose()
else:
# We are ready for more work.
self._state = self.READY
def openFountCB(fs):
# Does *not* run user-level code. The scoped vat is only for promise
# resolution.
try:
fd = intmask(fs.c_result)
vat, r = ruv.unstashFS(fs)
assert isinstance(r, LocalResolver)
with scopedVat(vat):
if fd < 0:
msg = ruv.formatError(fd).decode("utf-8")
r.smash(StrObject(u"Couldn't open file fount: %s" % msg))
# Done with fs.
ruv.fsDiscard(fs)
else:
r.resolve(FileFount(fs, fd, vat))
except:
print "Exception in openFountCB"
def openDrainCB(fs):
# As above.
try:
fd = intmask(fs.c_result)
vat, r = ruv.unstashFS(fs)
assert isinstance(r, LocalResolver)
with scopedVat(vat):
if fd < 0:
msg = ruv.formatError(fd).decode("utf-8")
r.smash(StrObject(u"Couldn't open file drain: %s" % msg))
# Done with fs.
ruv.fsDiscard(fs)
else:
r.resolve(FileDrain(fs, fd, vat))
except:
print "Exception in openDrainCB"
class GetContents(Object):
"""
Struct used to manage getContents/0 calls.
Has to be an Object so that it can be unified with LocalResolver.
No, seriously.
"""
# Our position reading from the file.
pos = 0
def __init__(self, vat, fs, fd, resolver):
self.vat = vat
self.fs = fs
self.fd = fd
self.resolver = resolver
self.pieces = []
# XXX read size should be tunable
self.buf = ruv.allocBuf(16384)
# Do our initial stashing.
ruv.stashFS(fs, (vat, self))
def append(self, data):
self.pieces.append(data)
self.pos += len(data)
# Queue another!
ruv.stashFS(self.fs, (self.vat, self))
self.queueRead()
def succeed(self):
# Clean up libuv stuff.
ruv.fsClose(self.vat.uv_loop, self.fs, self.fd, ruv.fsDiscard)
# Finally, resolve.
buf = "".join(self.pieces)
self.resolver.resolve(BytesObject(buf))
def fail(self, reason):
# Clean up libuv stuff.
ruv.fsClose(self.vat.uv_loop, self.fs, self.fd, ruv.fsDiscard)
# And resolve.
self.resolver.smash(StrObject(u"libuv error: %s" % reason))
def queueRead(self):
with scoped_alloc(ruv.rffi.CArray(ruv.buf_t), 1) as bufs:
bufs[0].c_base = self.buf.c_base
bufs[0].c_len = self.buf.c_len
ruv.fsRead(self.vat.uv_loop, self.fs, self.fd, bufs, 1, -1,
getContentsCB)
def openGetContentsCB(fs):
try:
fd = intmask(fs.c_result)
vat, r = ruv.unstashFS(fs)
assert isinstance(r, LocalResolver)
with scopedVat(vat):
if fd < 0:
msg = ruv.formatError(fd).decode("utf-8")
r.smash(StrObject(u"Couldn't open file fount: %s" % msg))
# Done with fs.
ruv.fsDiscard(fs)
else:
# Strategy: Read and use the callback to queue additional reads
# until done. This call is known to its caller to be expensive, so
# there's not much point in trying to be clever about things yet.
gc = GetContents(vat, fs, fd, r)
gc.queueRead()
except:
print "Exception in openGetContentsCB"
def getContentsCB(fs):
try:
size = intmask(fs.c_result)
# Don't use with-statements here; instead, each next action in
# GetContents will re-stash if necessary. ~ C.
vat, self = ruv.unstashFS(fs)
assert isinstance(self, GetContents)
if size > 0:
data = charpsize2str(self.buf.c_base, size)
self.append(data)
elif size < 0:
msg = ruv.formatError(size).decode("utf-8")
self.fail(msg)
else:
# End of file! Complete the callback.
self.succeed()
except Exception:
print "Exception in getContentsCB"
def renameCB(fs):
try:
success = intmask(fs.c_result)
vat, r = ruv.unstashFS(fs)
if success < 0:
msg = ruv.formatError(success).decode("utf-8")
r.smash(StrObject(u"Couldn't rename file: %s" % msg))
else:
r.resolve(NullObject)
# Done with fs.
ruv.fsDiscard(fs)
except:
print "Exception in renameCB"
class SetContents(Object):
pos = 0
def __init__(self, vat, data, resolver, src, dest):
self.vat = vat
self.data = data
self.resolver = resolver
self.src | |
http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerHourPerLiter = CommonUCUMUnitsCode("nmol/h/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerHourPerMilligramOfProtein = CommonUCUMUnitsCode("nmol/h/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerHourPerMilliliter = CommonUCUMUnitsCode("nmol/h/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerLiter = CommonUCUMUnitsCode("nmol/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerLiterPerMillimoleOfCreatinine = CommonUCUMUnitsCode("nmol/L/mmol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerLiterPerSecond = CommonUCUMUnitsCode("nmol/L/s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerLiterOfRedBloodCells = CommonUCUMUnitsCode("nmol/L{RBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMeterPerMilligramOfProtein = CommonUCUMUnitsCode("nmol/m/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMilligram = CommonUCUMUnitsCode("nmol/mg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMilligramPerHour = CommonUCUMUnitsCode("nmol/mg/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMinutePerMilligramOfHemoglobin = CommonUCUMUnitsCode(
"nmol/min/mg{hemoglobin}"
)
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMinutePerMilligramOfProtein = CommonUCUMUnitsCode("nmol/min/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMinutePerMilliliter = CommonUCUMUnitsCode("nmol/min/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMilliliter = CommonUCUMUnitsCode("nmol/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMilliliterPerHour = CommonUCUMUnitsCode("nmol/mL/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMilliliterPerMinute = CommonUCUMUnitsCode("nmol/mL/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMillimole = CommonUCUMUnitsCode("nmol/mmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Nanomole_MillimoleCre = CommonUCUMUnitsCode("nmol/mmol{Cre}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMillimoleOfCreatinine = CommonUCUMUnitsCode("nmol/mmol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMole = CommonUCUMUnitsCode("nmol/mol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerNanomole = CommonUCUMUnitsCode("nmol/nmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerSecond = CommonUCUMUnitsCode("nmol/s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerSecondPerLiter = CommonUCUMUnitsCode("nmol/s/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
NanomolePerMicromoleOfCreatinine = CommonUCUMUnitsCode("nmol/umol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Nanosecond = CommonUCUMUnitsCode("ns")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Ohm = CommonUCUMUnitsCode("Ohm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
OsmolePerKilogram = CommonUCUMUnitsCode("osm/kg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
OsmolePerLiter = CommonUCUMUnitsCode("osm/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Pascal = CommonUCUMUnitsCode("Pa")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picogram = CommonUCUMUnitsCode("pg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicogramPerDeciliter = CommonUCUMUnitsCode("pg/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicogramPerLiter = CommonUCUMUnitsCode("pg/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicogramPerMilligram = CommonUCUMUnitsCode("pg/mg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicogramPerMilliliter = CommonUCUMUnitsCode("pg/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicogramPerMillimeter = CommonUCUMUnitsCode("pg/mm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picokatal = CommonUCUMUnitsCode("pkat")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picoliter = CommonUCUMUnitsCode("pL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picometer = CommonUCUMUnitsCode("pm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picomole = CommonUCUMUnitsCode("pmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerDay = CommonUCUMUnitsCode("pmol/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerDeciliter = CommonUCUMUnitsCode("pmol/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerGram = CommonUCUMUnitsCode("pmol/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerHourPerMilligramOfProtein = CommonUCUMUnitsCode("pmol/h/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerHourPerMilliliter = CommonUCUMUnitsCode("pmol/h/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerLiter = CommonUCUMUnitsCode("pmol/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMilligramOfProtein = CommonUCUMUnitsCode("pmol/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMinute = CommonUCUMUnitsCode("pmol/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMinutePerMilligramOfProtein = CommonUCUMUnitsCode("pmol/min/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMilliliter = CommonUCUMUnitsCode("pmol/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMillimole = CommonUCUMUnitsCode("pmol/mmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMillimoleOfCreatinine = CommonUCUMUnitsCode("pmol/mmol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMole = CommonUCUMUnitsCode("pmol/mol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMicromole = CommonUCUMUnitsCode("pmol/umol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
PicomolePerMicromoleOfCreatinine = CommonUCUMUnitsCode("pmol/umol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picosecond = CommonUCUMUnitsCode("ps")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Picotesla = CommonUCUMUnitsCode("pT")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Second = CommonUCUMUnitsCode("s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Stokes = CommonUCUMUnitsCode("St")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Tonne = CommonUCUMUnitsCode("t")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnit = CommonUCUMUnitsCode("U")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer1Hour = CommonUCUMUnitsCode("U/(1.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer12Hour = CommonUCUMUnitsCode("U/(12.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer18Hour = CommonUCUMUnitsCode("U/(18.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer2Hour = CommonUCUMUnitsCode("U/(2.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer24Hour = CommonUCUMUnitsCode("U/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer10BillionCells = CommonUCUMUnitsCode("U/10*10{cells}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerTrillion = CommonUCUMUnitsCode("U/10*12")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMillion = CommonUCUMUnitsCode("U/10*6")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerBillion = CommonUCUMUnitsCode("U/10*9")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerDay = CommonUCUMUnitsCode("U/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerDeciliter = CommonUCUMUnitsCode("U/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerGram = CommonUCUMUnitsCode("U/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Unit_GramCre = CommonUCUMUnitsCode("U/g{Cre}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerGramOfHemoglobin = CommonUCUMUnitsCode("U/g{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerGramOfHemoglobin = CommonUCUMUnitsCode("U/g{hemoglobin}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
UnitsPerGramHemoglobin = CommonUCUMUnitsCode("U/g{Hgb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerHour = CommonUCUMUnitsCode("U/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Unit_Kilogram_Hour = CommonUCUMUnitsCode("U/kg/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerKilogramOfHemoglobin = CommonUCUMUnitsCode("U/kg{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerKilogramOfHemoglobin = CommonUCUMUnitsCode("U/kg{hemoglobin}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerLiter = CommonUCUMUnitsCode("U/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMinute = CommonUCUMUnitsCode("U/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMilliliter = CommonUCUMUnitsCode("U/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Unit_MilliliterRBC = CommonUCUMUnitsCode("U/mL{RBC}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMilliliterOfRedBloodCells = CommonUCUMUnitsCode("U/mL{RBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMillimoleOfCreatinine = CommonUCUMUnitsCode("U/mmol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMole = CommonUCUMUnitsCode("U/mol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerSecond = CommonUCUMUnitsCode("U/s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerMicromole = CommonUCUMUnitsCode("U/umol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroInternationalUnit = CommonUCUMUnitsCode("u[IU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrointernationalUnitPerLiter = CommonUCUMUnitsCode("u[IU]/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroInternationalUnitPerMilliliter = CommonUCUMUnitsCode("u[IU]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microequivalents = CommonUCUMUnitsCode("ueq")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroequivalentPerLiter = CommonUCUMUnitsCode("ueq/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroequivalentPerMilliliter = CommonUCUMUnitsCode("ueq/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram = CommonUCUMUnitsCode("ug")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPer24Hour = CommonUCUMUnitsCode("ug/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPer8Hour = CommonUCUMUnitsCode("ug/(8.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_Kilogram_Day = CommonUCUMUnitsCode("ug/(kg.d)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_Kilogram_Hour = CommonUCUMUnitsCode("ug/(kg.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_Specimen = CommonUCUMUnitsCode("ug/{Specimen}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroGramsPerTotalVolume = CommonUCUMUnitsCode("ug/{TotalVolume}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerDay = CommonUCUMUnitsCode("ug/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerDeciliter = CommonUCUMUnitsCode("ug/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_DeciliterRbc = CommonUCUMUnitsCode("ug/dL{rbc}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGram = CommonUCUMUnitsCode("ug/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_GramCre = CommonUCUMUnitsCode("ug/g{Cre}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfCreatinine = CommonUCUMUnitsCode("ug/g{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_GramDryWeight = CommonUCUMUnitsCode("ug/g{DryWeight}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_GramHgb = CommonUCUMUnitsCode("ug/g{Hgb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_GramTissue = CommonUCUMUnitsCode("ug/g{Tissue}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerHour = CommonUCUMUnitsCode("ug/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerKilogram = CommonUCUMUnitsCode("ug/kg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerKilogramPer8Hour = CommonUCUMUnitsCode("ug/kg/(8.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerKilogramPerDay = CommonUCUMUnitsCode("ug/kg/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerKilogramPerHour = CommonUCUMUnitsCode("ug/kg/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerKilogramPerMinute = CommonUCUMUnitsCode("ug/kg/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerLiter = CommonUCUMUnitsCode("ug/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerLiterPer24Hour = CommonUCUMUnitsCode("ug/L/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_LiterDDU = CommonUCUMUnitsCode("ug/L{DDU}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerSquareMeter = CommonUCUMUnitsCode("ug/m2")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerMilligram = CommonUCUMUnitsCode("ug/mg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_MilligramCre = CommonUCUMUnitsCode("ug/mg{Cre}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerMilligramOfCreatinine = CommonUCUMUnitsCode("ug/mg{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerMinute = CommonUCUMUnitsCode("ug/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerMilliliter = CommonUCUMUnitsCode("ug/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microgram_MilliliterFEU = CommonUCUMUnitsCode("ug/mL{FEU}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerMillimole = CommonUCUMUnitsCode("ug/mmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerNanogram = CommonUCUMUnitsCode("ug/ng")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramT4_Deciliter = CommonUCUMUnitsCode("ug{T4}/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microkatal = CommonUCUMUnitsCode("ukat")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microliter = CommonUCUMUnitsCode("uL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroliterPer2Hour = CommonUCUMUnitsCode("uL/(2.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroliterPerHour = CommonUCUMUnitsCode("uL/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micrometer = CommonUCUMUnitsCode("um")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrometerPerSecond = CommonUCUMUnitsCode("um/s")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole = CommonUCUMUnitsCode("umol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePer24Hour = CommonUCUMUnitsCode("umol/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerDay = CommonUCUMUnitsCode("umol/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerDeciliter = CommonUCUMUnitsCode("umol/dL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerGram = CommonUCUMUnitsCode("umol/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_GramCre = CommonUCUMUnitsCode("umol/g{Cre}")
"""
From: | |
index=idx,
columns=pd.MultiIndex.from_arrays([[""], ["$Q_{95\%}$"]]),
)
col = pd.MultiIndex.from_product(
[["training", "test"], ["mean $\pm$ std", "mean", "std", "RRMSE", "RRMSE(b)"]]
)
error_table = pd.concat(
[error_table, pd.DataFrame("", index=idx, columns=col)], axis=1
)
error_table.loc[:, ("training", "mean")] = (
(y_true_train - y_pred_train).mean().to_numpy()
)
error_table.loc[:, ("training", "std")] = (
(y_true_train - y_pred_train).std().to_numpy()
)
error_table.loc[:, ("test", "mean")] = (y_true_test - y_pred_test).mean().to_numpy()
error_table.loc[:, ("test", "std")] = (y_true_test - y_pred_test).std().to_numpy()
error_metric = TSCMetric(metric="rmse", mode="feature")
train_error_edmd = (
error_metric(
y_true=y_true_train,
y_pred=y_pred_train,
)
/ scale
* 100
).to_numpy()
train_error_naive = (
error_metric(
X_windows_train.loc[X_reconstruct_train_naive.index, :],
X_reconstruct_train_naive,
)
/ scale
* 100
).to_numpy()
test_error_edmd = (
error_metric(y_true=y_true_test, y_pred=y_pred_test).to_numpy() / scale * 100
).to_numpy()
test_error_naive = (
error_metric(
X_windows_test.loc[X_reconstruct_test.index, :], X_reconstruct_test_naive
)
/ scale
* 100
).to_numpy()
from numpy.core.defchararray import add as npaddstr
error_table.loc[:, ("training", "RRMSE")] = np.round(train_error_edmd, 2).astype(
str
)
error_table.loc[:, ("test", "RRMSE")] = np.round(test_error_edmd, 2).astype(str)
error_table.loc[:, ("training", "RRMSE(b)")] = np.round(
train_error_naive, 2
).astype(str)
error_table.loc[:, ("test", "RRMSE(b)")] = np.round(test_error_naive, 2).astype(str)
avr = lambda vals: np.round(np.average(vals), 2).astype(str)
weight_avr = lambda vals: np.average(vals, weights=scale)
error_table.loc["agg.", :] = [
"", # scale
"", # mean pm std
weight_avr(error_table.loc[:, ("training", "mean")]),
np.sqrt(
weight_avr(np.square(error_table.loc[:, ("training", "std")].to_numpy()))
),
avr(error_table.loc[:, ("training", "RRMSE")].astype(float)),
avr(error_table.loc[:, ("training", "RRMSE(b)")].astype(float)),
"", # mean pm std
np.round(weight_avr(error_table.loc[:, ("test", "mean")]), 2),
np.sqrt(weight_avr(np.square(error_table.loc[:, ("test", "std")].to_numpy()))),
avr(error_table.loc[:, ("test", "RRMSE")].astype(float)),
avr(error_table.loc[:, ("test", "RRMSE(b)")].astype(float)),
]
def mean_pm_std(_mean, _std):
return [
f"{np.round(i).astype(int)} $\pm$ {np.round(j).astype(int)}"
for i, j in zip(
_mean,
_std,
)
]
error_table.loc[:, ("training", "mean $\pm$ std")] = mean_pm_std(
error_table.loc[:, ("training", "mean")].to_numpy(),
error_table.loc[:, ("training", "std")].to_numpy(),
)
error_table.loc[:, ("test", "mean $\pm$ std")] = mean_pm_std(
error_table.loc[:, ("test", "mean")].to_numpy(),
error_table.loc[:, ("test", "std")].to_numpy(),
)
error_table = error_table.drop(error_table.columns[[2, 3, 7, 8]], axis=1)
error_table.loc[:, error_table.columns[[2, 3, 5, 6]]] = npaddstr(
error_table.loc[:, error_table.columns[[2, 3, 5, 6]]].to_numpy().astype(str),
" \%",
)
error_table.columns = error_table.columns.set_levels(
[
"",
f"test ($C_{{\\text{{test}}}} = {X_reconstruct_test.n_timeseries}$)",
f"training ($C_{{\\text{{train}}}} = {X_reconstruct_train.n_timeseries}$)",
],
level=0,
)
error_table.index.name = "ID ($s$)"
error_table = error_table.reset_index(col_level=1)
return error_table
def plot_paper_week_timeseries(X_windows_test, X_reconstruct_test):
ic_time = pd.DatetimeIndex(
X_windows_test.initial_states().index.get_level_values("time")
)
mondays_idx = np.where(ic_time.dayofweek == 0)[0] # 0 = Monday, 6 = Sunday
mondays_idx = mondays_idx[mondays_idx + 14 < len(ic_time)]
timedelta = ic_time[mondays_idx + 14] - ic_time[mondays_idx]
X_windows_test = X_windows_test.loc[X_reconstruct_test.index, :]
sensor_selection = [f"sensor_{s}_counts" for s in [2, 9, 28, 31]]
fortnights_ids = [113] # 215
for plot_id in fortnights_ids:
plot_data_test = X_windows_test.loc[plot_id : plot_id + 6, :]
plot_data_reconstruct = X_reconstruct_test.loc[plot_id : plot_id + 6, :]
f, ax = plt.subplots(
nrows=len(sensor_selection),
sharex=True,
figsize=(DOUBLE_COLUMN, DOUBLE_COLUMN * 0.55),
)
f.subplots_adjust(bottom=0.136, top=0.893)
for i, sensor in enumerate(sensor_selection):
sensor_data_test = plot_data_test.loc[:, sensor]
sensor_data_reconstruct = plot_data_reconstruct.loc[:, sensor]
is_last = i == len(sensor_selection) - 1
ics = sensor_data_reconstruct.initial_states()
ax[i].plot(
ics.index.get_level_values("time"),
ics.to_numpy().ravel(),
"o",
markersize=5,
color="mediumblue",
label="initial condition" if is_last else None,
)
ax[i].plot(
sensor_data_test.index.get_level_values("time"),
sensor_data_test.to_numpy().ravel(),
color="black",
label="true values" if is_last else None,
)
ax[i].plot(
sensor_data_reconstruct.index.get_level_values("time"),
sensor_data_reconstruct.to_numpy(),
color="red",
alpha=0.7,
label="predicted values" if is_last else None,
)
ax[i].set_xticks(
np.arange(
np.datetime64("2019-02-11"),
np.datetime64("2019-02-19"),
np.timedelta64(1, "D"),
)
)
ax[i].set_xticklabels([f"{i}.02." for i in range(11, 19)])
ax[i].set_xlim([np.datetime64("2019-02-11"), np.datetime64("2019-02-18")])
max_val = np.max(sensor_data_test.to_numpy().ravel())
ytick = np.round(max_val, decimals=-2)
ytick = int(ytick)
ax[i].set_yticks([max_val // 2, ytick])
ax[i].set_yticklabels(["", ""])
ax[i].set_ylim([0, ytick])
ax_right = ax[i].twinx()
ax_right.set_ylabel(
sensor.replace("sensor_", "").replace("_counts", ""),
labelpad=10,
rotation=0,
)
ax_right.set_yticks([])
ax[i].set_ylabel(ytick, labelpad=10, rotation=0)
ax[i].grid()
f.align_ylabels()
if is_last:
f.legend(loc="upper center", ncol=3)
ax = f.add_subplot(111, frame_on=False)
ax1 = ax.twinx()
ax.tick_params(labelcolor="none", bottom=False, left=False, right=False)
ax.set_xlabel("time (year 2019)")
ax.set_ylabel("pedestrian count", labelpad=10)
ax1.tick_params(labelcolor="none", bottom=False, left=False, right=False)
ax1.set_ylabel("sensor ID", labelpad=5, rotation=-90)
def scale_sensors(X_windows):
quantile95 = pd.DataFrame(X_windows).quantile(q=0.95)
return quantile95
def plot_paper_sensor_profile(
scale,
X_windows_test,
X_reconstruct_test,
):
sensor_columns = X_reconstruct_test.columns[
X_reconstruct_test.columns.str.startswith("sensor_")
]
X_reconstruct_test = X_reconstruct_test.loc[:, sensor_columns]
X_reconstruct_test = X_reconstruct_test.drop(
X_reconstruct_test.groupby("ID").head(1).index, axis=0
)
X_windows_test = X_windows_test.drop(
X_windows_test.groupby("ID").head(1).index, axis=0
)
X_win_scaled = X_windows_test.loc[X_reconstruct_test.index, :] / scale
X_reconstruct_scaled = X_reconstruct_test / scale
sensor_columns = X_windows_test.columns[
X_windows_test.columns.str.startswith("sensor_")
]
f, ax = plt.subplots(
nrows=3,
ncols=len(sensor_columns) + 3,
figsize=(DOUBLE_COLUMN, DOUBLE_COLUMN * 0.5),
gridspec_kw={
"width_ratios": np.append(np.ones(len(sensor_columns) + 1), [0.3, 0.2])
},
)
gs = ax[0, -1].get_gridspec()
# from https://matplotlib.org/3.1.1/gallery/subplots_axes_and_figures/gridspec_and_subplots.html
# remove the underlying axes
for _ax in ax[0:2, -1].ravel():
_ax.remove()
axcbar_vals = f.add_subplot(gs[0:2, -1])
# f.suptitle(column)
f.subplots_adjust(
hspace=0.14, wspace=0.02, bottom=0.138, top=0.921, left=0.088, right=0.943
)
# left=0.048, bottom=0.1, right=0.9, # top=0.974,
ax[0][-2].remove()
ax[1][-2].remove()
ax[2][-2].remove()
# ax[1][-1].remove()
cmap = "GnBu"
for i, column in enumerate(sensor_columns):
is_first = i == 0
is_last = i == len(sensor_columns) - 1
true_values = X_win_scaled.loc[:, column]
pred_values = X_reconstruct_scaled.loc[:, column]
true_values = true_values.to_numpy().reshape(
[true_values.n_timeseries, true_values.n_timesteps]
)
pred_values = pred_values.to_numpy().reshape(
[pred_values.n_timeseries, pred_values.n_timesteps]
)
vmin, vmax = 0, 1
vals = ax[0][i].imshow(
true_values, vmin=vmin, vmax=vmax, aspect="auto", cmap=cmap
)
ax[0][i].set_title(
column.replace("sensor_", "").replace("_counts", ""),
# + f"\n({int(mean_max_val_per_day[i]):d})"
)
ax[1][i].imshow(pred_values, vmin=vmin, vmax=vmax, aspect="auto", cmap=cmap)
vmin, vmax = -0.5, 0.5
ax[2][i].imshow(
pred_values - true_values, aspect="auto", vmin=vmin, vmax=vmax, cmap="bwr"
)
# ax[2][i].set_xlabel(
# f"{int(mean_absolute_error(X_windows_test.loc[:, column], X_reconstruct_test.loc[:, column]))}"
# )
if is_first:
ax[0][0].set_xticks([])
ax[1][0].set_xticks([])
ax[2][0].set_xticks([0, 23])
ax[2][0].set_xticklabels([1, 24])
ax[0][0].set_yticks([0, 100, 200, 300])
ax[1][0].set_yticks([0, 100, 200, 300])
ax[2][0].set_yticks([0, 100, 200, 300])
else:
ax[0][i].set_xticks([])
ax[1][i].set_xticks([])
ax[2][i].set_xticks([23])
ax[2][i].set_xticklabels([24])
ax[0][i].set_yticks([])
ax[1][i].set_yticks([])
ax[2][i].set_yticks([])
if is_last:
cbar = plt.colorbar(vals, axcbar_vals, ticks=[0, 0.5, 1], extend="max")
cbar.ax.set_yticklabels(["0", "0.5", "1"])
mean_true_values = TSCDataFrame(
np.average(X_win_scaled.values, axis=1),
index=X_win_scaled.index,
columns=["average"],
)
mean_true_values = mean_true_values.to_numpy().reshape(
[mean_true_values.n_timeseries, mean_true_values.n_timesteps]
)
mean_pred_values = TSCDataFrame(
np.average(
X_reconstruct_scaled.values, axis=1
), # , weights=mean_max_val_per_day
index=X_win_scaled.index,
columns=["average"],
)
mean_pred_values = mean_pred_values.to_numpy().reshape(
[mean_pred_values.n_timeseries, mean_pred_values.n_timesteps]
)
c = -3
ax[0][c].imshow(mean_true_values, vmin=0, vmax=1, aspect="auto", cmap=cmap)
ax[1][c].imshow(mean_pred_values, vmin=0, vmax=1, aspect="auto", cmap=cmap)
err = ax[2][c].imshow(
mean_pred_values - mean_true_values,
vmin=-0.5,
vmax=0.5,
cmap="bwr",
aspect="auto",
)
cbar = plt.colorbar(err, ax[2][-1], ticks=[-0.5, 0, 0.5], extend="both")
cbar.ax.set_yticklabels([-0.5, 0, 0.5])
ax[0][c].set_title("agg.")
ax[0][c].set_xticks([])
ax[1][c].set_xticks([])
ax[2][c].set_xticks([23])
ax[2][c].set_xticklabels([24])
ax[0][c].set_yticks([])
ax[1][c].set_yticks([])
ax[2][c].set_yticks([])
ax[0][c].set_ylabel("true values", labelpad=10, rotation=-90)
ax[1][c].set_ylabel("pred. values", labelpad=10, rotation=-90)
ax[2][c].set_ylabel("difference", labelpad=10, rotation=-90)
ax[0][c].yaxis.set_label_position("right")
ax[1][c].yaxis.set_label_position("right")
ax[2][c].yaxis.set_label_position("right")
f.add_subplot(111, frame_on=False)
plt.tick_params(labelcolor="none", bottom=False, left=False)
plt.xlabel("time [hour]")
plt.ylabel("initial condition")
def plot_timeseries_dmap(
sensor_str, dmap_idx, X_windows_train, X_windows_test, X_latent_train, X_latent_test
):
f, ax = plt.subplots(
nrows=2, sharex=True, figsize=(DOUBLE_COLUMN, DOUBLE_COLUMN * 0.55)
)
X_windows_train.loc[:, sensor_str].plot(
ax=ax[0], legend=False, ylabel="", c="black"
)
X_windows_test.loc[:, sensor_str].plot(
ax=ax[0], ylabel="", legend=False, c="black"
)
# -1 bc. in paper is 1-indexed, in code 0-indexed
X_latent_train.loc[:, f"dmap{dmap_idx-1}"].plot(ax=ax[1], legend=False, c="black")
X_latent_test.loc[:, f"dmap{dmap_idx-1}"].plot(ax=ax[1], legend=False, c="black")
ax[0].set_ylabel(sensor_str.replace("_", ""))
ax[1].set_ylabel(fr"$\varphi_{{{dmap_idx}}}$")
def plot_paper_dmap_selection(
edmd,
selection,
X_latent_test,
):
dmap_eigenvalues = edmd.named_steps["laplace"].eigenvalues_
print(f"Smallest DMAP eigenvalue = {dmap_eigenvalues[-1]}")
sensor_mask = X_latent_test.columns.str.startswith("sensor_")
X_latent_test = X_latent_test.loc[:, ~sensor_mask]
plot_data = X_latent_test.iloc[:, selection]
ncols = plot_data.shape[1] // 2
f, ax = plt.subplots(
nrows=2, ncols=ncols, figsize=(DOUBLE_COLUMN * 0.8, ONEHALF_COLUMN * 0.8)
)
f.subplots_adjust(left=0.148, bottom=0.155, right=0.978, top=0.921, hspace=0.248)
vmin = None
vmax = None
for i, col in enumerate(plot_data.columns):
values = plot_data.loc[:, [col]].to_numpy()
values = values.reshape(plot_data.n_timeseries, plot_data.n_timesteps)
values = values[:100, :]
_ax = ax[i // ncols][np.mod(i, ncols)]
_ax.imshow(values, vmin=vmin, vmax=vmax, aspect="auto")
_ax.set_title(i + 1)
is_first_in_row = np.mod(i, ncols) == 0
is_first_row = i // ncols == 0
dmap_idx = int(col.replace("dmap", ""))
_ax.set_title(
rf"$\varphi_{{{dmap_idx+1}}}$",
verticalalignment="center",
y=1.05,
)
if is_first_in_row and is_first_row:
ax[0][0].set_yticks([0, 25, 50, 75, 100])
ax[1][0].set_yticks([0, 25, 50, 75, 100])
f.add_subplot(111, frame_on=False)
plt.tick_params(labelcolor="none", bottom=False, left=False)
plt.xlabel("time [hour]")
plt.ylabel("initial condition")
if is_first_in_row:
if is_first_row:
_ax.set_xticks([])
else:
_ax.set_xticks([0, 24])
_ax.set_yticks([0, 25, 50, 75, 100])
else:
if is_first_row:
_ax.set_xticks([])
else:
_ax.set_xticks([0, 24])
_ax.set_yticks([])
_ax.set_yticks([])
def plot_paper_dmap_3d(
X_latent_train,
X_latent_test,
X_latent_interp_test,
):
dmap_selection = np.array([1, 2, 9]) # in paper 1-based indexing, i.e. (2,3,10)
sensor_mask = X_latent_train.columns.str.startswith("sensor_")
X_latent_test = X_latent_test.loc[:, ~sensor_mask]
plot_data_interp = X_latent_interp_test.iloc[:, dmap_selection]
plot_data_interp = plot_data_interp.drop(plot_data_interp.initial_states(10).index)
plot_data = X_latent_test.iloc[:, dmap_selection]
time_idx_interp = pd.DatetimeIndex(plot_data_interp.index.get_level_values("time"))
from matplotlib.colors import ListedColormap, Normalize
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from pylab import cm
vmax = 1440
new_cmap = cm.get_cmap("twilight", vmax)
color_points = lambda tidx: 60 * tidx.hour + tidx.minute
f = plt.figure(figsize=(DOUBLE_COLUMN, DOUBLE_COLUMN * 0.45))
f.subplots_adjust(left=0.033, bottom=0.23, top=1, wspace=0.4)
ax = f.add_subplot(121, projection="3d")
scatter_point_cloud = ax.scatter(
plot_data.iloc[:, 0],
plot_data.iloc[:, 1],
plot_data.iloc[:, 2],
c=color_points(plot_data.index.get_level_values("time")),
s=0.5,
cmap=new_cmap,
)
lab = lambda n: rf"$\varphi_{{{n+1}}}$"
ax.set_xlabel(lab(dmap_selection[0]))
ax.set_ylabel(lab(dmap_selection[1]))
ax.set_zlabel(lab(dmap_selection[2]))
ax_first = ax
# Small central plot:
_plot_data = pd.DataFrame(plot_data.copy())
_plot_data.index = pd.MultiIndex.from_arrays(
[
plot_data.index.get_level_values("ID"),
plot_data.index.get_level_values("time").strftime("%w:%a-%H"),
]
)
mean_phi = _plot_data.groupby("time").mean()
ax = f.add_axes([0.32, 0.15, 0.3, 0.3], projection="3d")
ax.plot(
mean_phi.loc[:, "dmap1"].to_numpy().ravel(),
mean_phi.loc[:, "dmap2"].to_numpy().ravel(),
mean_phi.loc[:, "dmap9"].to_numpy().ravel(),
c="black",
linewidth=1,
)
for weekday in ["Wed", "Sun"]:
if weekday == "Wed":
color = "orange"
ax.scatter(
mean_phi.loc["3:Wed-20", "dmap1"],
mean_phi.loc["3:Wed-20", "dmap2"],
mean_phi.loc["3:Wed-20", "dmap9"],
s=15,
c="orange",
)
elif weekday == "Sun":
color = "blue"
ax.scatter(
mean_phi.loc["0:Sun-08", "dmap1"],
mean_phi.loc["0:Sun-08", "dmap2"],
mean_phi.loc["0:Sun-08", "dmap9"],
s=15,
c="blue",
)
bool_day = mean_phi.index.str.contains(weekday)
ax.plot(
mean_phi.loc[bool_day, "dmap1"].to_numpy().ravel(),
mean_phi.loc[bool_day, "dmap2"].to_numpy().ravel(),
mean_phi.loc[bool_day, "dmap9"].to_numpy().ravel(),
c=color,
linewidth=1,
)
ax.view_init(-ax.elev, ax.azim)
ax.set_xticks([-0.1, 0, 0.1])
ax.set_yticks([-0.1, 0, 0.1])
ax.set_zticks([-0.1, 0, 0.1])
ax.set_xlim(ax_first.get_xlim())
ax.set_ylim(ax_first.get_xlim())
ax.set_zlim(ax_first.get_xlim())
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
cax = f.add_axes([0.2, 0.1, 0.6, 0.03])
cbar = plt.colorbar(
scatter_point_cloud,
cax=cax,
orientation="horizontal",
| |
from collections import namedtuple
import datetime
import io
import matplotlib
matplotlib.use('Agg') # noqa: E402
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.stats
import time
from tensorboardX import SummaryWriter
import pyro
import pyro.distributions as dist
import pyro.infer
import pyro.optim
from pyro import poutine
from pyro.infer import (
config_enumerate,
Trace_ELBO, TraceGraph_ELBO,
SVI
)
from pyro.contrib.autoguide import (
AutoDelta, AutoDiagonalNormal,
AutoMultivariateNormal, AutoGuideList
)
import torch
import torch.distributions.constraints as constraints
import scene_generation.data.planar_scene_arrangement_utils as psa_utils
import scene_generation.differentiable_nlp as diff_nlp
class DataWrapperForObs:
# Convenience wrapper on data:
# If data's batch dimension is length-0,
# we must be running the model in generative mode,
# so any slicing returns None.
# Otherwise, pass through slicing to the real data.
def __init__(self, data):
self.data = data
def __getitem__(self, key):
if self.data.shape[0] > 0:
return self.data[key]
else:
return None
def expand_partial_pose_to_full_pose(this_pose):
full_poses = torch.zeros(this_pose.shape[0], 6,
requires_grad=False,
dtype=this_pose.dtype)
full_poses[:, 0] = this_pose[:, 0]
full_poses[:, 1] = this_pose[:, 1]
full_poses[:, 5] = this_pose[:, 2]
return full_poses
def collapse_full_pose_to_partial_pose(pose):
partial_pose = torch.zeros(pose.shape[0], 3, requires_grad=False,
dtype=pose.dtype)
partial_pose[:, 0] = pose[:, 0]
partial_pose[:, 1] = pose[:, 1]
partial_pose[:, 2] = pose[:, 5]
return partial_pose
VectorizedEnvironments = namedtuple(
"VectorizedEnvironments",
["idents", "poses", "present", "n_samples"], verbose=False)
class ObjectWorldPriorDescription:
''' Prior distribution container for each object class over the ground '''
def __init__(self, class_name,
prior_means_by_class,
prior_vars_by_class):
# TODO(gizatt) Switch this to Normal-inverse-Wishart_distribution
# if/when that becomes supported by Pyro.
self.mean = pyro.sample(
'%s_mean' % class_name,
dist.Normal(prior_means_by_class[class_name]["mean"],
prior_means_by_class[class_name]["var"]).to_event(1))
self.var = pyro.sample(
'%s_var' % class_name,
dist.LogNormal(
prior_vars_by_class[class_name]["mean"],
prior_vars_by_class[class_name]["var"]).to_event(1))
self.dist = dist.Normal(self.mean, self.var).to_event(1)
class MultiObjectMultiClassModel():
def __init__(self, use_projection=True, noisy_projection=False,
use_amortization=True,
max_num_objects=10, min_num_objects=0):
self.use_projection = use_projection
self.noisy_projection = noisy_projection
self.use_amortization = use_amortization
self.max_num_objects = max_num_objects
self.min_num_objects = min_num_objects
self._prepareObjectClasses()
self._prepareInferenceModule()
self.rbts_cache = {}
def _prepareObjectClasses(self):
self.object_classes = ["small_box"]
self.object_class_to_index = {}
for i, obj in enumerate(self.object_classes):
self.object_class_to_index[obj] = i
self.n_object_classes = len(self.object_classes)
def _prepareInferenceModule(self):
H = 10
self.inference_modules = {}
for class_name in self.object_classes:
self.inference_modules[class_name] = torch.nn.Sequential(
torch.nn.Linear(3, H),
torch.nn.ReLU(),
torch.nn.Linear(H, 3),
)
def vectorizeEnvironments(self, envs):
# Vectorization into a
# poses (order x y theta) and
# idents (ci) per object
n_environments = len(envs)
poses = torch.Tensor(n_environments, self.max_num_objects*3)
poses[:, :] = 0.
idents = torch.LongTensor(n_environments, self.max_num_objects)
idents[:, :] = 0
present = torch.Tensor(n_environments, self.max_num_objects)
present[:, :] = 0
n_samples = torch.Tensor(n_environments)
n_samples[:] = 0
for i, env in enumerate(envs):
n_samples[i] = env["n_objects"]
for k in range(env["n_objects"]):
obj = env["obj_%04d" % k]
idents[i, k] = self.object_class_to_index[obj["class"]]
poses[i, (k*3):(k*3+3)] = torch.Tensor(obj["pose"])
present[i, k] = 1
return VectorizedEnvironments(
idents=idents, poses=poses,
present=present, n_samples=n_samples)
def devectorizeEnvironments(self, data):
if not isinstance(data, VectorizedEnvironments):
raise ValueError("Expected VectorizedEnvironments input")
envs = []
for i in range(data.idents.shape[0]):
env = {}
n_objects = 0
for k in range(data.n_samples[i]):
obj = {
"pose": data.poses[i, (k*3):(k*3+3)].cpu().detach().numpy(),
"class": self.object_classes[data.idents[i, k].cpu().detach().item()]
}
env["obj_%04d" % k] = obj
env["n_objects"] = data.n_samples[i]
envs.append(env)
return envs
def _buildRbtFromGeneratedRowAndNewObject(
self, generated_data, row_i, iter_i, ci):
# Build a descriptor string to check into the cache
# "<obj 0 class>_<obj 1 class>_<...>
present = generated_data.present[row_i, 0:iter_i+1].cpu().detach().numpy()
present[-1] = 1.
if not np.allclose(present, 1.):
return None
previous_object_classes = generated_data.idents[row_i, 0:iter_i+1].cpu().detach().numpy()
previous_object_classes[-1] = ci[row_i]
class_string = "_".join([self.object_classes[cj] for cj in previous_object_classes])
if class_string not in self.rbts_cache.keys():
# Cache miss, generate the RBT
env = {"n_objects": iter_i+1}
for iter_j in range(iter_i+1):
env["obj_%04d" % iter_j] = {
"class": self.object_classes[previous_object_classes[iter_j]],
"pose": np.zeros(3)
}
new_rbt, _ = psa_utils.build_rbt_from_summary(env)
self.rbts_cache[class_string] = new_rbt
return self.rbts_cache[class_string]
def _SampleObjectClass(self, generated_data, i, keep_going, obs=None):
''' Given the currently generated data, the current object number,
and the keep_going mask, decide what class to spawn next.
I'm focusing for now on learning properties of classes rather than
what class to spawn. In future, needs dependency on the environment
or at least parameterization of this underlying distribution. '''
return poutine.mask(lambda: pyro.sample("%d_class_assignment" % i,
dist.Categorical(torch.Tensor([1.0]).expand(
self.n_object_classes)),
obs=obs), keep_going)()
def _SampleObjectPlacement(self, ci, generated_data, i,
keep_going, object_world_priors,
obs=None):
''' Given the current object class, the current generated data,
the current object number, a keep_going mask, and the
keep_going mask, sample a placement for the object. '''
assert(ci.dim() == 1)
pre_poses_by_class = []
# Sample a pre-pose for each class so we can create the
# mixture over classes. We're using discrete class
# choices per sample, but this at least batches the sampling
# part.
# Not using a plate here so I can use AutoGuideList;
# AutoGuideList does not support sequential pyro.plate.
for k in pyro.plate("class_prior_mixture_%d" % (i),
self.n_object_classes):
pre_poses_part = poutine.mask(
lambda: pyro.sample('pre_poses_%d_%d' % (i, k),
object_world_priors[k].dist),
keep_going)()
pre_poses_by_class.append(pre_poses_part)
# Turn ci indices into a one-hot so we can select out the poses.
one_hot = torch.zeros(ci.shape + (self.n_object_classes,))
one_hot.scatter_(1, ci.unsqueeze(1), 1)
one_hot = one_hot.view(-1, 1, self.n_object_classes)
pre_poses = one_hot.matmul(
torch.stack(pre_poses_by_class, dim=1)).view(ci.shape[0], 3)
# Replace projection with a fixed-variance operation that
# doesn't move the pose far, with the same site name.
if not self.use_projection:
new_pose = poutine.mask(
lambda: pyro.sample(
"post_poses_%d" % i,
dist.Normal(
pre_poses,
scale=0.01*torch.ones(pre_poses.shape)
).to_event(1),
obs=obs),
keep_going)()
return new_pose
else:
# Load in the previous generated poses as poses to be fixed
# in the projection.
if i > 0:
q0_fixed = torch.cat([
expand_partial_pose_to_full_pose(
generated_data.poses[:, (k*3):(k*3+3)])
for k in range(i)], dim=-1)
else:
q0_fixed = None
# Build an RBT for each row in the batch...
rbts = [self._buildRbtFromGeneratedRowAndNewObject(
generated_data, k, i, ci)
for k in range(generated_data.poses.shape[0])]
# Constrain free poses to have y,z,roll,pitch constant
ik_constraints = [
diff_nlp.object_at_specified_pose_constraint_constructor_factory(
i, np.array([0., 0., 0.5, 0., 0., 0.]),
np.array([1., 1., 0.5, 0., 0., 2*np.pi]))]
projection_dist = diff_nlp.ProjectToFeasibilityWithIKAsDistribution(
rbts, expand_partial_pose_to_full_pose(pre_poses),
ik_constraints, 0.05, 0.01, noisy_projection=False,
q0_fixed=q0_fixed, event_select_inds=torch.tensor([0, 1, 5]))
projected_pose = poutine.mask(
lambda: pyro.sample(
"post_poses_%d" % (i), projection_dist, obs=obs),
keep_going)()
return projected_pose[:, :]
def model(self, data=None, subsample_size=None):
# Instantiate priors on object means.
prior_means_by_class = {}
prior_vars_by_class = {}
for class_name in self.object_classes:
prior_means_by_class[class_name] = {
"mean": torch.Tensor([0.5, 0.5, np.pi/2.]),
"var": torch.Tensor([0.5, 0.5, np.pi/2.])
}
# Remember that these are LogNormal means
prior_vars_by_class[class_name] = {
"mean": torch.Tensor([0., 0., 0.]),
"var": torch.Tensor([2., 2., 2.])
}
object_world_priors = []
for class_name in self.object_classes:
object_world_priors.append(
ObjectWorldPriorDescription(
class_name, prior_means_by_class,
prior_vars_by_class))
# Sample rates for the total number of objects we'll spawn.
# (Includes an entry for 0 objects.)
num_object_choices = self.max_num_objects - self.min_num_objects + 1
sample_rates = pyro.sample(
'num_objects_weights',
dist.Dirichlet(torch.ones(
num_object_choices)))
sample_distribution = dist.Categorical(sample_rates)
# Generate in vectorized form for easier batch conversion at the end
data_batch_size = 1
if data is not None:
if not isinstance(data, VectorizedEnvironments):
raise ValueError("Expected VectorizedEnvironments input")
if (data.idents.shape[1] != self.max_num_objects and
data.poses.shape[1] != self.max_num_objects*3):
raise ValueError("Got unexpected data shape.")
data_batch_size = data.idents.shape[0]
with pyro.plate('data', size=data_batch_size) as subsample_inds:
subsample_size = subsample_inds.shape[0]
generated_data = VectorizedEnvironments(
idents=torch.LongTensor(subsample_size, self.max_num_objects),
poses=torch.Tensor(subsample_size, self.max_num_objects*3),
present=torch.Tensor(subsample_size, self.max_num_objects),
n_samples=torch.Tensor(subsample_size))
generated_data.idents[:, :] = -1
generated_data.poses[:, :] = 0
generated_data.present[:, :] = 0
generated_data.n_samples[:] = 0
# Sample actual number of samples immediately
# (since we can directly observe this from data)
gt_n_samples = None
if data is not None:
gt_n_samples = data.n_samples[subsample_inds] - \
self.min_num_objects
num_samples = pyro.sample(
"num_samples", sample_distribution,
obs=gt_n_samples) + self.min_num_objects
generated_data.n_samples[:] = num_samples
# Go and spawn each object in order!
for i in range(self.max_num_objects):
gt_class = None
gt_pose = None
gt_keep_going = None
if data is not None:
gt_class = data.idents[subsample_inds, i]
gt_pose = data.poses[subsample_inds, (i*3):(i*3+3)]
gt_keep_going = data.present[subsample_inds, i]
keep_going = (i < num_samples)
ci = self._SampleObjectClass(generated_data, i,
keep_going, gt_class)
pose = self._SampleObjectPlacement(
ci, generated_data, i, keep_going,
object_world_priors, gt_pose)
# Fill in generated data appropriately
generated_data.idents[:, i] = (
ci.view(-1).type(torch.long)*keep_going.type(torch.long))
for k in range(3):
generated_data.poses[:, 3*i+k] = (
torch.Tensor(pose[:, k])
* keep_going.type(torch.float))
generated_data.present[:, i] = keep_going
return generated_data
def generation_guide(self, data=None, subsample_size=None):
for class_name in self.object_classes:
pyro.module(class_name + "_inference_module",
self.inference_modules[class_name])
# Instantiate priors on object means.
prior_means_by_class = {}
prior_vars_by_class = {}
for class_name in self.object_classes:
prior_means_by_class[class_name] = {
"mean": pyro.param("auto_%s_mean_mean" % class_name,
torch.rand(3)),
"var": pyro.param("auto_%s_mean_var" % class_name,
torch.rand(3),
constraint=constraints.positive)
}
# Remember that these are LogNormal means
prior_vars_by_class[class_name] = {
"mean": pyro.param("auto_%s_var_mean" % class_name,
torch.Tensor([0., 0., 0.])),
"var": pyro.param("auto_%s_var_var" % class_name,
torch.Tensor([2., 2., 2.]),
constraint=constraints.positive),
}
object_world_priors = []
for class_name in self.object_classes:
object_world_priors.append(
ObjectWorldPriorDescription(
class_name, prior_means_by_class,
prior_vars_by_class))
# Sample rates for the total number of objects we'll spawn.
# (Includes an entry for 0 objects.)
num_object_choices = self.max_num_objects - self.min_num_objects + | |
x = b) for coeffs on each input
coeffs = np.zeros((self.system.ninputs, self.basis.N))
for i in range(self.system.ninputs):
# Set up the matrices to get inputs
M = np.zeros((self.timepts.size, self.basis.N))
b = np.zeros(self.timepts.size)
# Evaluate at each time point and for each basis function
# TODO: vectorize
for j, t in enumerate(self.timepts):
for k in range(self.basis.N):
M[j, k] = self.basis(k, t)
b[j] = inputs[i, j]
# Solve a least squares problem for the coefficients
alpha, residuals, rank, s = np.linalg.lstsq(M, b, rcond=None)
coeffs[i, :] = alpha
return coeffs
# Utility function to convert coefficient vector to input vector
def _coeffs_to_inputs(self, coeffs):
# TODO: vectorize
inputs = np.zeros((self.system.ninputs, self.timepts.size))
for i, t in enumerate(self.timepts):
for k in range(self.basis.N):
phi_k = self.basis(k, t)
for inp in range(self.system.ninputs):
inputs[inp, i] += coeffs[inp, k] * phi_k
return inputs
#
# Log and statistics
#
# To allow some insight into where time is being spent, we keep track of
# the number of times that various functions are called and (optionally)
# how long we spent inside each function.
#
def _reset_statistics(self, log=False):
"""Reset counters for keeping track of statistics"""
self.log = log
self.cost_evaluations, self.cost_process_time = 0, 0
self.constraint_evaluations, self.constraint_process_time = 0, 0
self.eqconst_evaluations, self.eqconst_process_time = 0, 0
self.system_simulations = 0
def _print_statistics(self, reset=True):
"""Print out summary statistics from last run"""
print("Summary statistics:")
print("* Cost function calls:", self.cost_evaluations)
if self.log:
print("* Cost function process time:", self.cost_process_time)
if self.constraint_evaluations:
print("* Constraint calls:", self.constraint_evaluations)
if self.log:
print(
"* Constraint process time:", self.constraint_process_time)
if self.eqconst_evaluations:
print("* Eqconst calls:", self.eqconst_evaluations)
if self.log:
print(
"* Eqconst process time:", self.eqconst_process_time)
print("* System simulations:", self.system_simulations)
if reset:
self._reset_statistics(self.log)
# Create an input/output system implementing an MPC controller
def _create_mpc_iosystem(self, dt=True):
"""Create an I/O system implementing an MPC controller"""
def _update(t, x, u, params={}):
coeffs = x.reshape((self.system.ninputs, -1))
if self.basis:
# Keep the coeffecients unchanged
# TODO: could compute input vector, shift, and re-project (?)
self.initial_guess = coeffs
else:
# Shift the basis elements by one time step
self.initial_guess = np.hstack(
[coeffs[:, 1:], coeffs[:, -1:]]).reshape(-1)
res = self.compute_trajectory(u, print_summary=False)
return res.inputs.reshape(-1)
def _output(t, x, u, params={}):
if self.basis:
# TODO: compute inputs from basis elements
raise NotImplementedError("basis elements not implemented")
else:
inputs = x.reshape((self.system.ninputs, -1))
return inputs[:, 0]
return ct.NonlinearIOSystem(
_update, _output, dt=dt,
inputs=self.system.nstates, outputs=self.system.ninputs,
states=self.system.ninputs *
(self.timepts.size if self.basis is None else self.basis.N))
# Compute the optimal trajectory from the current state
def compute_trajectory(
self, x, squeeze=None, transpose=None, return_states=None,
initial_guess=None, print_summary=True, **kwargs):
"""Compute the optimal input at state x
Parameters
----------
x : array-like or number, optional
Initial state for the system.
return_states : bool, optional
If True, return the values of the state at each time (default =
False).
squeeze : bool, optional
If True and if the system has a single output, return the system
output as a 1D array rather than a 2D array. If False, return the
system output as a 2D array even if the system is SISO. Default
value set by config.defaults['control.squeeze_time_response'].
transpose : bool, optional
If True, assume that 2D input arrays are transposed from the
standard format. Used to convert MATLAB-style inputs to our
format.
Returns
-------
res : OptimalControlResult
Bundle object with the results of the optimal control problem.
res.success: bool
Boolean flag indicating whether the optimization was successful.
res.time : array
Time values of the input.
res.inputs : array
Optimal inputs for the system. If the system is SISO and squeeze
is not True, the array is 1D (indexed by time). If the system is
not SISO or squeeze is False, the array is 2D (indexed by the
output number and time).
res.states : array
Time evolution of the state vector (if return_states=True).
"""
# Allow 'return_x` as a synonym for 'return_states'
return_states = ct.config._get_param(
'optimal', 'return_x', kwargs, return_states, pop=True, last=True)
# Store the initial state (for use in _constraint_function)
self.x = x
# Allow the initial guess to be overriden
if initial_guess is None:
initial_guess = self.initial_guess
else:
initial_guess = self._process_initial_guess(initial_guess)
# Call ScipPy optimizer
res = sp.optimize.minimize(
self._cost_function, initial_guess,
constraints=self.constraints, **self.minimize_kwargs)
# Process and return the results
return OptimalControlResult(
self, res, transpose=transpose, return_states=return_states,
squeeze=squeeze, print_summary=print_summary)
# Compute the current input to apply from the current state (MPC style)
def compute_mpc(self, x, squeeze=None):
"""Compute the optimal input at state x
This function calls the :meth:`compute_trajectory` method and returns
the input at the first time point.
Parameters
----------
x: array-like or number, optional
Initial state for the system.
squeeze : bool, optional
If True and if the system has a single output, return the system
output as a 1D array rather than a 2D array. If False, return the
system output as a 2D array even if the system is SISO. Default
value set by config.defaults['control.squeeze_time_response'].
Returns
-------
input : array
Optimal input for the system at the current time. If the system
is SISO and squeeze is not True, the array is 1D (indexed by
time). If the system is not SISO or squeeze is False, the array
is 2D (indexed by the output number and time). Set to `None`
if the optimization failed.
"""
res = self.compute_trajectory(x, squeeze=squeeze)
return inputs[:, 0] if res.success else None
# Optimal control result
class OptimalControlResult(sp.optimize.OptimizeResult):
"""Result from solving an optimal control problem.
This class is a subclass of :class:`scipy.optimize.OptimizeResult` with
additional attributes associated with solving optimal control problems.
Attributes
----------
inputs : ndarray
The optimal inputs associated with the optimal control problem.
states : ndarray
If `return_states` was set to true, stores the state trajectory
associated with the optimal input.
success : bool
Whether or not the optimizer exited successful.
problem : OptimalControlProblem
Optimal control problem that generated this solution.
"""
def __init__(
self, ocp, res, return_states=False, print_summary=False,
transpose=None, squeeze=None):
"""Create a OptimalControlResult object"""
# Copy all of the fields we were sent by sp.optimize.minimize()
for key, val in res.items():
setattr(self, key, val)
# Remember the optimal control problem that we solved
self.problem = ocp
# Reshape and process the input vector
coeffs = res.x.reshape((ocp.system.ninputs, -1))
# Compute time points (if basis present)
if ocp.basis:
inputs = ocp._coeffs_to_inputs(coeffs)
else:
inputs = coeffs
# See if we got an answer
if not res.success:
warnings.warn(
"unable to solve optimal control problem\n"
"scipy.optimize.minimize returned " + res.message, UserWarning)
# Optionally print summary information
if print_summary:
ocp._print_statistics()
if return_states and inputs.shape[1] == ocp.timepts.shape[0]:
# Simulate the system if we need the state back
_, _, states = ct.input_output_response(
ocp.system, ocp.timepts, inputs, ocp.x, return_x=True,
solve_ivp_kwargs=ocp.solve_ivp_kwargs)
ocp.system_simulations += 1
else:
states = None
# Process data as a time response (with "outputs" = inputs)
response = TimeResponseData(
ocp.timepts, inputs, states, issiso=ocp.system.issiso(),
transpose=transpose, return_x=return_states, squeeze=squeeze)
self.time = response.time
self.inputs = response.outputs
self.states = response.states
# Compute the input for a nonlinear, (constrained) optimal control problem
def solve_ocp(
sys, horizon, X0, cost, constraints=[], terminal_cost=None,
terminal_constraints=[], initial_guess=None, basis=None, squeeze=None,
transpose=None, return_states=False, log=False, **kwargs):
"""Compute the solution to an optimal control problem
Parameters
----------
sys : InputOutputSystem
I/O system for which the optimal input will be computed.
horizon : 1D array_like
List of times at which the optimal input should be computed.
X0: array-like or number, optional
Initial condition (default = 0).
cost : callable
Function that returns the integral cost given the current state
and input. Called as `cost(x, u)`.
constraints : list of tuples, optional
List of constraints that should hold at each point in the time vector.
Each element of the list should consist of a tuple with first element
given by :meth:`scipy.optimize.LinearConstraint` or
:meth:`scipy.optimize.NonlinearConstraint` and the remaining
elements of the tuple are the arguments that would be passed to those
functions. The following tuples are supported:
* (LinearConstraint, A, lb, ub): The matrix A is multiplied by stacked
vector of the state and input at each point on the trajectory for
comparison against the upper and lower bounds.
* (NonlinearConstraint, fun, lb, ub): a user-specific | |
<gh_stars>0
"""
This module defines the database classes.
"""
import json
import zlib
from typing import Any
import gridfs
from bson import ObjectId
from maggma.stores.aws import S3Store
from monty.dev import deprecated
from monty.json import MontyEncoder
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
)
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.io.vasp import Chgcar
from pymongo import ASCENDING, DESCENDING
from atomate.utils.database import CalcDb
from atomate.utils.utils import get_logger
__author__ = "<NAME>"
__credits__ = "<NAME>"
__email__ = "<EMAIL>"
logger = get_logger(__name__)
# If we use Maggmastores we will have to initialize a magmma store for each object typl
OBJ_NAMES = (
"dos",
"bandstructure",
"chgcar",
"locpot",
"aeccar0",
"aeccar1",
"aeccar2",
"elfcar",
)
class VaspCalcDb(CalcDb):
"""
Class to help manage database insertions of Vasp drones
"""
def __init__(
self,
host="localhost",
port=27017,
database="vasp",
collection="tasks",
user=None,
password=<PASSWORD>,
**kwargs,
):
super().__init__(host, port, database, collection, user, password, **kwargs)
def build_indexes(self, indexes=None, background=True):
"""
Build the indexes.
Args:
indexes (list): list of single field indexes to be built.
background (bool): Run in the background or not.
TODO: make sure that the index building is sensible and check for
existing indexes.
"""
_indices = (
indexes
if indexes
else [
"formula_pretty",
"formula_anonymous",
"output.energy",
"output.energy_per_atom",
"dir_name",
]
)
self.collection.create_index("task_id", unique=True, background=background)
# build single field indexes
for i in _indices:
self.collection.create_index(i, background=background)
# build compound indexes
for formula in ("formula_pretty", "formula_anonymous"):
self.collection.create_index(
[
(formula, ASCENDING),
("output.energy", DESCENDING),
("completed_at", DESCENDING),
],
background=background,
)
self.collection.create_index(
[
(formula, ASCENDING),
("output.energy_per_atom", DESCENDING),
("completed_at", DESCENDING),
],
background=background,
)
# TODO consider sensible index building for the maggma stores
def insert_task(self, task_doc, use_gridfs=False):
"""
Inserts a task document (e.g., as returned by Drone.assimilate()) into the database.
Handles putting DOS, band structure and charge density into GridFS as needed.
During testing, a percentage of runs on some clusters had corrupted AECCAR files
when even if everything else about the calculation looked OK.
So we do a quick check here and only record the AECCARs if they are valid
Args:
task_doc (dict): the task document
use_gridfs (bool): store the data matching OBJ_NAMES to gridfs.
if maggma_store_type is set (ex. "s3") this flag will be ignored
Returns:
(int) - task_id of inserted document
"""
big_data_to_store = {}
def extract_from_calcs_reversed(obj_key):
"""
Grab the data from calcs_reversed.0.obj_key and store on gridfs directly or some Maggma store
Args:
obj_key: Key of the data in calcs_reversed.0 to store
"""
calcs_r_data = task_doc["calcs_reversed"][0][obj_key]
# remove the big object from all calcs_reversed
# this can catch situations were the drone added the data to more than one calc.
for i_calcs in range(len(task_doc["calcs_reversed"])):
del task_doc["calcs_reversed"][i_calcs][obj_key]
return calcs_r_data
# drop the data from the task_document and keep them in a separate dictionary (big_data_to_store)
if (
self._maggma_store_type is not None or use_gridfs
) and "calcs_reversed" in task_doc:
for data_key in OBJ_NAMES:
if data_key in task_doc["calcs_reversed"][0]:
big_data_to_store[data_key] = extract_from_calcs_reversed(data_key)
# insert the task document
t_id = self.insert(task_doc)
if "calcs_reversed" in task_doc:
# upload the data to a particular location and store the reference to that location in the task database
for data_key, data_val in big_data_to_store.items():
fs_di_, compression_type_ = self.insert_object(
use_gridfs=use_gridfs,
d=data_val,
collection=f"{data_key}_fs",
task_id=t_id,
)
self.collection.update_one(
{"task_id": t_id},
{
"$set": {
f"calcs_reversed.0.{data_key}_compression": compression_type_
}
},
)
self.collection.update_one(
{"task_id": t_id},
{"$set": {f"calcs_reversed.0.{data_key}_fs_id": fs_di_}},
)
return t_id
def retrieve_task(self, task_id):
"""
Retrieves a task document and unpacks the band structure and DOS as dict
Args:
task_id: (int) task_id to retrieve
Returns:
(dict) complete task document with BS + DOS included
"""
task_doc = self.collection.find_one({"task_id": task_id})
calc = task_doc["calcs_reversed"][0]
if "bandstructure_fs_id" in calc:
bs = self.get_band_structure(task_id)
calc["bandstructure"] = bs.as_dict()
if "dos_fs_id" in calc:
dos = self.get_dos(task_id)
calc["dos"] = dos.as_dict()
if "chgcar_fs_id" in calc:
chgcar = self.get_chgcar(task_id)
calc["chgcar"] = chgcar
if "aeccar0_fs_id" in calc:
aeccar = self.get_aeccar(task_id)
calc["aeccar0"] = aeccar["aeccar0"]
calc["aeccar2"] = aeccar["aeccar2"]
return task_doc
def insert_object(self, use_gridfs, *args, **kwargs):
"""Insert the object into big object storage, try maggma_store if
it is availible, if not try storing directly to girdfs.
Args:
use_gridfs (bool): Whether to store on gridfs if maggma storage is not availible
Returns:
fs_id: The id of the stored object
compression_type: The compress method of the stored object
"""
if self._maggma_store_type is not None:
return self.insert_maggma_store(*args, **kwargs)
elif use_gridfs:
return self.insert_gridfs(*args, **kwargs)
def insert_gridfs(self, d, collection="fs", compress=True, oid=None, task_id=None):
"""
Insert the given document into GridFS.
Args:
d (dict): the document
collection (string): the GridFS collection name
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or ObjectId()
compression_type = None
# always perform the string conversion when inserting directly to gridfs
d = json.dumps(d, cls=MontyEncoder)
if compress:
d = zlib.compress(d.encode(), compress)
compression_type = "zlib"
fs = gridfs.GridFS(self.db, collection)
m_data = {"compression": compression_type}
if task_id:
m_data["task_id"] = task_id
# Putting task id in the metadata subdocument as per mongo specs:
# https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#terms
fs_id = fs.put(d, _id=oid, metadata=m_data)
return fs_id, compression_type
def insert_maggma_store(
self, d: Any, collection: str, oid: ObjectId = None, task_id: Any = None
):
"""
Insert the given document into a Maggma store, first check if the store is already
Args:
data: the document to be stored
collection (string): the name prefix for the maggma store
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or str(ObjectId())
compression_type = None
doc = {
"fs_id": oid,
"maggma_store_type": self.get_store(collection).__class__.__name__,
"compression": compression_type,
"data": d,
}
search_keys = [
"fs_id",
]
if task_id is not None:
search_keys.append("task_id")
doc["task_id"] = str(task_id)
elif isinstance(d, dict) and "task_id" in d:
search_keys.append("task_id")
doc["task_id"] = str(d["task_id"])
# make sure the store is availible
with self.get_store(collection) as store:
ping_ = store.index._collection.database.command("ping")
if ping_.get("ok", 0) != 1.0:
raise ConnectionError(
f"Not connected to the index store of {self.__name__}.maggma_store[{collection}]"
)
if isinstance(store, S3Store):
# TODO find some way to ping the aws service
# ping_ = self._maggma_stores[collection].s3_bucket._name
pass
if store.compress:
compression_type = "zlib"
doc["compression"] = "zlib"
store.update([doc], search_keys)
return oid, compression_type
def get_data_from_maggma_or_gridfs(self, task_id, key):
"""
look for a task, then the object of type key associated with that task
Returns:
The data stored on object storage, typically a dictionary
"""
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task["calcs_reversed"][0][f"{key}_fs_id"]
obj_dict = None
if self._maggma_store_type is not None:
with self.get_store(f"{key}_fs") as store:
obj_dict = store.query_one({"fs_id": fs_id})["data"]
# if the object cannot be found then try using the grid_fs method
if obj_dict is not None:
return obj_dict
else:
fs = gridfs.GridFS(self.db, f"{key}_fs")
bs_json = zlib.decompress(fs.get(fs_id).read())
obj_dict = json.loads(bs_json.decode())
return obj_dict
def get_band_structure(self, task_id):
"""
Read the BS data into a PMG BandStructure or BandStructureSymmLine object
Args:
task_id(int or str): the task_id containing the data
Returns:
BandStructure or BandStructureSymmLine
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="bandstructure")
if obj_dict["@class"] == "BandStructure":
return BandStructure.from_dict(obj_dict)
elif obj_dict["@class"] == "BandStructureSymmLine":
return BandStructureSymmLine.from_dict(obj_dict)
else:
raise ValueError(
"Unknown class for band structure! {}".format(obj_dict["@class"])
)
def get_dos(self, task_id):
"""
Read the DOS data into a PMG DOS object
Args:
task_id(int or str): the task_id containing the data
Returns:
CompleteDos object
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="dos")
return CompleteDos.from_dict(obj_dict)
@deprecated("No longer supported, use get_chgcar instead")
def get_chgcar_string(self, task_id):
pass
def get_chgcar(self, task_id):
"""
Read the CHGCAR data into a PMG Chgcar object
Args:
task_id(int or str): the task_id containing the data
Returns:
chgcar: Chgcar object
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="chgcar")
return Chgcar.from_dict(obj_dict)
def get_aeccar(self, task_id, check_valid=True):
"""
Read the AECCAR0 + AECCAR2 grid_fs data into a Chgcar object
Args:
task_id(int or str): the task_id containing the gridfs metadata
check_valid (bool): make sure that the aeccar is positive definite
Returns:
{"aeccar0" : Chgcar, "aeccar2" : Chgcar}: dict of Chgcar objects
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="aeccar0")
aeccar0 = Chgcar.from_dict(obj_dict)
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="aeccar2")
aeccar2 = Chgcar.from_dict(obj_dict)
if check_valid and (aeccar0.data["total"] + aeccar2.data["total"]).min() < 0:
ValueError(f"The AECCAR seems to be corrupted for task_id | |
Ports
# pol_group: Name of the Policy Group to apply
# mod (Optional): Mod as an integer (almost always 1)
# Port: Part as an integer
# sub_start: Starting sub port as an integer
# sub_end: Ending sub port as an integer
def int_sub_selector_individual(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_type': '',
'pol_group': '',
'port': '',
'sub_start': '',
'sub_end': ''}
optional_args = {'mod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod'] = int(templateVars['mod'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['sub_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sub_start'] = int(templateVars['sub_start'])
if not int(templateVars['sub_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sub_end'] = int(templateVars['sub_end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_sub_selector_individual.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Switch Profile
# status: created | created,modified | deleted
# int_profile: Name of the Interface Profile to hook to Switch Selector
def int_prof_to_sw_profile(self, **kwargs):
required_args = {'name': '',
'status': '',
'int_profile': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_prof_to_sw_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/nprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# fex_pol_grp: Name of the FEX Policy Group
# status: created | created,modified | deleted
def fex_profile(self, **kwargs):
required_args = {'name': '',
'fex_pol_grp': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/fexprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# port_type: accportgrp | accbundle
# Note: accportgrp = Access Port
# Note: accbundle = vPC or Port Channel
# pol_group: Name of the Policy Group to apply
# mod_start: Starting mod as an integer (almost always 1)
# mod_end: Ending mod as an integer (almost always 1)
# port_start: Starting port as an integer
# port_end: Ending port as an integer
def fex_int_profile(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_type': '',
'pol_group': '',
'port_start': '',
'port_end': '',
'fex_id': ''}
optional_args = {'mod_start': '1',
'mod_end': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_start'] = int(templateVars['mod_start'])
if not int(templateVars['mod_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_end'] = int(templateVars['mod_end'])
if not int(templateVars['port_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_start'] = int(templateVars['port_start'])
if not int(templateVars['port_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_end'] = int(templateVars['port_end'])
if not int(templateVars['fex_id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['fex_id'] = int(templateVars['fex_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_int_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/fexprof-{}/hports-{}-typ-range'
.format(templateVars['name'], templateVars['port_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# mod_start: Starting mod as an integer (almost always 1)
# mod_end: Ending mod as an integer (almost always 1)
# port_start: Starting port as an integer
# port_end: Ending port as an integer
# fex_id: Integer ID of the FEX
# fex_pol_grp: Name of FEX Policy Group
# fex_prof: Name of the FEX Profile
def fex_leaf_profile(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_start': '',
'port_end': '',
'fex_id': '',
'fex_prof': '',
'fex_pol_grp': ''}
optional_args = {'mod_start': '1',
'mod_end': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_start'] = int(templateVars['mod_start'])
if not int(templateVars['mod_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_end'] = int(templateVars['mod_end'])
if not int(templateVars['port_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_start'] = int(templateVars['port_start'])
if not int(templateVars['port_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_end'] = int(templateVars['port_end'])
if not int(templateVars['fex_id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['fex_id'] = int(templateVars['fex_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_leaf_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabTnPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabTnPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: The name of the Tenant
# status: created | created,modified | deleted
def tenant(self, **kwargs):
required_args = {'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "tenant.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# enforce: enforced | unenforced
# status: created | created,modified | deleted
def vrf(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'enforce': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vrf.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# contract: Name of the Contract
# status: created | created,modified | deleted
def vz_any_provide(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vz_any_provide.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# contract: Name of the Contract
# status: created | created,modified | deleted
def vz_any_consume(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vz_any_consume.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# prefgrp: disabled | enabled
def prefgrp(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'prefgrp': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "prefgrp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}/any'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the BD
# arp: yes | no
# mdest: bd-flood | drop | encap-flood
# mcast: flood | opt-flood
# unicast: yes | no
# unk_unicast: proxy | flood
# vrf: Name of associated VRF -- moving to OPTIONAL to not break older,
# versions, but has no functionality at this point
# status: created | created,modified | deleted
# multicast (Optional): yes | no -- multicast routing tick box
def bd(self, **kwargs):
required_args = {'tn_name': '',
'name': | |
<gh_stars>0
## Output data:
# Area: A
# Second moments of area: Ix, Iy
# Product moment of area: Ixy
# Section moduli: Kx, Ky
# (Plus in case of circularly symmetric cross-section:
# Polar moment of area: Ip
# Polar modulus: Kp)
from re import S
from telnetlib import IP
import numpy as np
import math
def transform(func):
def inner(*args, **kwargs):
print(args,kwargs)
if len(args) >3:
ans = func(*args[:-3])
if not kwargs['rad']: #conversion if the default unit isnt radian
phi= math.radians(args[-1])
else:
phi = args[-1]
ans["Ixi"], ans["Ieta"], ans["Ixieta"], ans["Ip2"] = Iarbitraryaxis(**ans, x=args[-3],y=args[-2], phi=phi)
else:
ans = func(*args)
return ans
return inner
@transform
def Circle(d, t = 0):
if t == 0:
A = d ** 2 * np.pi / 4
Ix = d ** 4 * np.pi / 64
Iy = d ** 4 * np.pi / 64
Ixy = 0
Kx = d ** 3 * np.pi / 32
Ky = d ** 3 * np.pi / 32
Ip = d ** 4 * np.pi / 32
Kp = d ** 3 * np.pi / 16
I1 = Ix
I2 = I1
properties = {
"A": A,
"Ix": Ix,
"Iy": Iy,
"Ixy": Ixy,
"Kx": Kx,
"Ky": Ky,
"Ip": Ip,
"Kp": Kp,
"alpha": 0,
"I1": I1,
"I2": I2
}
else:
di = d - t
A = (d ** 2 - di ** 2) * np.pi / 4
Ix = (d ** 4 - di ** 4) * np.pi / 64
Iy = (d ** 4 - di ** 4) * np.pi / 64
Ixy = 0
Kx = (d ** 4 - di ** 4) * np.pi / (32 * d)
Ky = (d ** 4 - di ** 4) * np.pi / (32 * d)
Ip = (d ** 4 - di ** 4) * np.pi / 32
Kp = (d ** 4 - di ** 4) * np.pi / (16 * d)
I1 = Ix
I2 = I1
properties = {
"A": A,
"Ix": Ix,
"Iy": Iy,
"Ixy": Ixy,
"Kx": Kx,
"Ky": Ky,
"Ip": Ip,
"Kp": Kp,
"alpha": 0,
"I1": I1,
"I2": I2
}
return properties
@transform
def Rectangle(w, h, t = 0):
if t == 0:
A = w * h
Ix = w * h ** 3 / 12
Iy = w ** 3 * h / 12
Ip = Ix + Iy
Ixy = 0
Kx = 2 * Ix / h
Ky = 2 * Iy / w
if Iy > Ix:
alpha = np.pi / 2
I1 = Iy
I2 = Ix
else:
alpha = 0
I1 = Ix
I2 = Iy
properties = {
"A": A,
"Ix": Ix,
"Iy": Iy,
"Ip": Ip,
"Ixy": Ixy,
"Kx": Kx,
"Ky": Ky,
"alpha": alpha,
"I1": I1,
"I2": I2
}
else:
properties = RectangularHS(w, h, w-2*t, h-2*t)
return properties
def RectangularHS(w2, h2, w1, h1):
A = (w2 * h2) - (w1 * h1)
Ix = (w2 * h2 ** 3 - w1 * h1 ** 3) / 12
Iy = (w2 ** 3 * h2 - w1 ** 3 * h1) / 12
Ip = Ix + Iy
Ixy = 0
Kx = 2 * Ix / h2
Ky = 2 * Iy / w2
if Iy > Ix:
alpha = np.pi / 2
I1 = Iy
I2 = Ix
else:
alpha = 0
I1 = Ix
I2 = Iy
properties = {
"A": A,
"Ix": Ix,
"Iy": Iy,
"Ip": Ip,
"Ixy": Ixy,
"Kx": Kx,
"Ky": Ky,
"alpha": alpha,
"I1": I1,
"I2": I2
}
return properties
@transform
def Ellipse(a, b, t = 0):
if t == 0:
A = a * b * np.pi
Ix = a * b ** 3 * np.pi / 4
Iy = a ** 3 * b * np.pi / 4
Ip = Ix + Iy
Ixy = 0
Kx = Ix / b
Ky = Iy / a
if Iy > Ix:
alpha = np.pi / 2
I1 = Iy
I2 = Ix
else:
alpha = 0
I1 = Ix
I2 = Iy
else:
a2 = a
b2 = b
a1 = a - t
b1 = b - t
A = (a2 * b2 - a1 * b1) * np.pi
Ix = (a2 * b2 ** 3 - a1 * b1 ** 3) * np.pi / 4
Iy = (a2 ** 3 * b2 - a1 ** 3 * b1) * np.pi / 4
Ip = Ix + Iy
Ixy = 0
Kx = Ix / b2
Ky = Iy / a2
if Iy > Ix:
alpha = np.pi / 2
I1 = Iy
I2 = Ix
else:
alpha = 0
I1 = Ix
I2 = Iy
properties = {
"A": A,
"Ix": Ix,
"Iy": Iy,
"Ixy": Ixy,
"Ip": Ip,
"Kx": Kx,
"Ky": Ky,
"alpha": alpha,
"I1": I1,
"I2": I2
}
return properties
@transform
def IsoscelesTriangle(w, h, t = 0):
if t == 0:
A = w * h / 2
Ix = w * h ** 3 / 36
Iy = w ** 3 * h / 48
Ip = Ix + Iy
Ixy = 0
Kx = 3 * Ix / 2 / h
Ky = 2 * Iy / w
if Iy > Ix:
alpha = np.pi / 2
I1 = Iy
I2 = Ix
else:
alpha = 0
I1 = Ix
I2 = Iy
else:
w2 = w
h2 = h
phi = np.arctan(h / (w / 2))
u = t / np.sin(phi)
v = t / np.tan(phi)
w1 = w - 2 * (u + v)
h1 = h * (w1 / w)
A = (w2 * h2 - w1 * h1) / 2
A2 = w2*h2 / 2
A1 = w1*h1 / 2
Sy2 = h2 / 3
Sy1 = t + h1 / 3
Sy = (A2*Sy2 - A1*Sy1) / (A2 - A1)
dy2 = Sy2 - Sy
dy1 = Sy1 - Sy
Ixs2 = w2 * h2**3 / 36
Ixs1 = w1 * h1**3 / 36
Ix2 = Ixs2 + A2 * dy2**2
Ix1 = Ixs1 + A1 * dy1**2
Ix = Ix2 - Ix1
Iy = (w2 ** 3 * h2 - w1 ** 3 * h1) / 48
Ip = Ix + Iy
Ixy = 0
Kx = 3 * Ix / 2 / h2
Ky = 2 * Iy / w2
if Iy > Ix:
alpha = np.pi / 2
I1 = Iy
I2 = Ix
else:
alpha = 0
I1 = Ix
I2 = Iy
properties = {
"A": A,
"Ix": Ix,
"Iy": Iy,
"Ip": Ip,
"Ixy": Ixy,
"Kx": Kx,
"Ky": Ky,
"alpha": alpha,
"I1": I1,
"I2": I2
}
return properties
@transform
def RightTriangle(w, h, t = 0):
if t == 0:
A = w * h / 2
Ix = w * h ** 3 / 36
Iy = w ** 3 * h / 36
Ip = Ix + Iy
Ixy = w**2 * h**2 / 72
Kx = Ix / (2/3 * h)
Ky = Ix / (2/3 * w)
I1 = (Ix+Iy)/2 + 0.5*np.sqrt((Ix-Iy)**2 + 4* Ixy**2)
I2 = (Ix+Iy)/2 - 0.5*np.sqrt((Ix-Iy)**2 + 4* Ixy**2)
if Ix != Iy and Ixy !=0:
alpha = np.arctan((Ix-I1)/Ixy)
else:
alpha = np.pi/4
else:
w2 = w
h2 = h
phi = np.arctan(h2/w2)
u = t / np.sin(phi)
v = t / np.tan(phi)
p = t / np.cos(phi)
q = t * np.tan(phi)
w1 = w2 - t - u - v
h1 = h2 - t - p - q
A = (w2 * h2 - w1 * h1) / 2
A2 = w2 * h2 / 2
A1 = w1 * h1 / 2
Sx2 = w2 / 3
Sy2 = h2 / 3
Sx1 = t + w1 / 3
Sy1 = t + h1 / 3
Sx = (A2*Sx2 - A1*Sx1) / (A2 - A1)
Sy = (A2*Sy2 - A1*Sy1) / (A2 | |
import re
import yaml
import logging
logger = logging.getLogger(__name__)
from pylatexenc.macrospec import MacroSpec, ParsedMacroArgs, MacroStandardArgsParser
from pylatexenc import latexwalker
from latexpp.macro_subst_helper import MacroSubstHelper
from latexpp.fix import BaseFix
# parse entropy macros etc.
_qitobjdefs = yaml.safe_load(r"""
stdset:
HH:
type: Hbase
Hzero:
type: Hbase
sub: '\mathrm{max},0'
Hmin:
type: Hbase
sub: '\mathrm{min}'
Hmaxf:
type: Hbase
sub: '\mathrm{max}'
Hfn:
type: Hfnbase
Dmax:
type: Dbase
sub: '\mathrm{max}'
Dminz:
type: Dbase
sub: '0'
Dminf:
type: Dbase
sub: '\mathrm{min}'
Dr:
type: Dbase
sub: '\mathrm{Rob}'
DHyp:
type: Dbase
sub: '\mathrm{H}'
Dhyp:
type: Dbase
sub: '\mathrm{h}'
DCoh:
type: DCohbase
DCohx:
type: DCohbase
DD:
type: DD
""")
baseqitobjs = yaml.safe_load("""
IdentProc:
type: IdentProc
ee:
type: ee
""")
_fixed_repl = {
'DSym': lambda self: self.DSym,
'HSym': lambda self: self.HSym,
}
class ExpandQitObjects(BaseFix):
r"""
Expand the definitions for the "QIT Objects" that are defined via the
{phfqit} package.
If applied along with :py:class:`latexpp.fixes.pkg.phfqit.ExpandMacros`, the
dependency on package {phfqit} should be removed.
Arguments:
- `qitobjs`: a dictionary of custom "QIT Objects" to expand. The dictionary
has the structure ``{macroname: qitobjspec, ...}``, where:
- `macroname` is the name of the macro representing this QIT object (no
leading backslash);
- `qitobjspec` is a dictionary with the following structure::
{
'type': <type>,
'sym': <sym>
<...>
}
The `<type>` is a string that must be one of the following QIT object
types: 'Hbase', 'Hfnbase', 'DD', 'Dbase', 'DCohbase', 'IdentProc', 'ee'.
This determines on one hand how the arguments to the macro are parsed
and on the other hand the template latex code that will serve as a
replacement for the QIT object invocation.
The `<sym>` is any string that will be used to override the default
symbol for this qit object type. The 'sym' key can be left out to use
the default symbol for the qit object.
Depending on `<type>`, you can specify further keys that specify how the
qit object is rendered (specified alongside `type: <type>` above, where
`<...>` stands):
- `<type>='Hbase'`: You may further specify ``'sub': <sub>`` which
specifies the subscript to add to the entropy object. This can be any
LaTeX code.
- `<type>='Hfnbase'`: You may further specify ``'sub': <sub>`` and
``'sup': <sup>`` which specifies the subscript and superscript to add
to the entropy object. Both can be any LaTeX code.
- `<type>='Dbase'`: You may further specify ``'sub': <sub>`` which
specifies the subscript to add to the relative entropy object. This
can be any LaTeX code. You can also specify 'default_epsilon' to give
a default value of the epsilon argument (any LaTeX code).
- `<type>='Dalpha'`: You can also specify 'default_alpha' and
'default_epsilon' to give a default value for these arguments (any
LaTeX code).
- `<type>='DD'`: There are no further keys you can specify.
- `<type>='DCohbase'`: There are no further keys you can specify.
- `<type>='IdentProc'`: There are no further keys you can specify.
- `<type>='ee'`: There are no further keys you can specify.
- `qitobjdef`: a list of built-in QIT object sets to use, designated by
builtin set name. Currently only the set named "stdset" is available,
i.e., you may use ``qitobjdef=[]`` (don't use built-in QIT objects) or
``qitobjdef=['stdset']`` (use built-in QIT objects).
- `HSym`: the default symbol to use for entropy-like QIT objects. Defaults
to 'H'
- `DSym`: the default symbol to use for relative-entropy-like QIT objects.
Defaults to 'D'
- `DCSym`: the default symbol to use for coherent-relative-entropy-like QIT
objects. Defaults to '\\hat{D}'
"""
def __init__(self, qitobjs=dict(), qitobjdef=['stdset'],
HSym='H', DSym='D', DCSym=r'\hat{D}'):
super().__init__()
self.qitobjs = dict(baseqitobjs)
for qitobjname in qitobjdef:
self.qitobjs.update(_qitobjdefs[qitobjname])
self.qitobjs.update(qitobjs)
self.HSym = HSym
self.DSym = DSym
self.DCSym = DCSym
def specs(self, **kwargs):
return dict(
macros= (
MacroSpec(mname, args_parser=PhfQitObjectArgsParser(self.qitargspec(m['type'])))
for mname, m in self.qitobjs.items()
)
)
def qitargspec(self, t):
return {
"IdentProc": "`[[{",
"ee": "^",
"Hbase": "`[[{[",
"Hfnbase": "`(",
"DD": "_^`{{",
"Dbase": "[`{{",
"Dalpha": "[[`{{",
"DCohbase": "[`{{{{{",
}.get(t)
def fix_node(self, n, **kwargs):
if n.isNodeType(latexwalker.LatexMacroNode) and n.macroname in _fixed_repl:
return _fixed_repl[n.macroname](self)
if not n.isNodeType(latexwalker.LatexMacroNode) or n.macroname not in self.qitobjs:
return None
m = self.qitobjs[n.macroname]
fixs = self.fix_qitobj(m, n)
#logger.debug(" --> %r", fixs)
return fixs
def fix_qitobj(self, m, n):
#logger.debug("fix_qitobj: m=%r, n=%r", m, n)
if m['type'] == 'IdentProc':
nsizespec, nsysA, nsysB, narg = n.nodeargd.argnlist
sym = m.get('sym', r'\mathrm{id}')
subscript = ''
A, B = '', ''
if nsysA is not None:
A = self.preprocess_contents_latex(nsysA)
if nsysB is not None:
B = self.preprocess_contents_latex(nsysB)
if A:
if B:
subscript = A + r'\to ' + B
else:
subscript = A
text = '{' + sym + '}'
if subscript:
text += '_{' + subscript + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'ee':
narg, = n.nodeargd.argnlist
sym = m.get('sym', r'e')
return '{'+sym+'}^{' + self.preprocess_contents_latex(narg) + '}'
if m['type'] == 'Hbase':
nsizespec, nstate, nepsilon, ntargetsys, ncondsys = n.nodeargd.argnlist
sym = m.get('sym', self.HSym)
sub = m.get('sub', None)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od
text += self.preprocess_contents_latex(ntargetsys)
if ncondsys is not None:
text += r'\,' + md + r'\,' + self.preprocess_contents_latex(ncondsys)
text += cd
if nstate is not None:
text += r'_{' + self.preprocess_contents_latex(nstate) + '}'
return text
if m['type'] == 'Hfnbase':
nsizespec, narg = n.nodeargd.argnlist
sub = m.get('sub', None)
sup = m.get('sup', None)
sym = m.get('sym', self.HSym)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if sup:
text += '^{' + sup + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'Hfnbase':
nsub, nsup, nsizespec, narg = n.nodeargd.argnlist
sub = m.get('sub', None)
sup = m.get('sup', None)
sym = m.get('sym', self.HSym)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if sup:
text += '^{' + sup + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'Dbase':
nepsilon, nsizespec, nstate, nrel = n.nodeargd.argnlist
sub = m.get('sub', None)
sym = m.get('sym', self.DSym)
default_epsilon = m.get('default_epsilon', None)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
elif default_epsilon:
text += '^{' + default_epsilon + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'Dalpha':
nalpha, nepsilon, nsizespec, nstate, nrel = n.nodeargd.argnlist
sym = m.get('sym', self.DSym)
default_alpha = m.get('default_alpha', None)
default_epsilon = m.get('default_epsilon', None)
text = '{' + sym + '}'
if nalpha is not None:
text += '_{' + self.preprocess_contents_latex(nalpha) + '}'
elif default_alpha:
text += '_{' + default_alpha + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
elif default_epsilon:
text += '^{' + default_epsilon + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'DD':
nsub, nsup, nsizespec, nstate, nrel = n.nodeargd.argnlist
sym = m.get('sym', self.DSym)
text = '{' + sym + '}'
if nsub is not None:
text += '_{' + self.preprocess_contents_latex(nsub) + '}'
if nsup is not None:
text += '^{' + self.preprocess_contents_latex(nsup) + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'DCohbase':
nepsilon, | |
= [7.0*c, 13.0*c, c, -7.0*c, -13.0*c, -c, -7.0*c, -13.0*c, -c, 7.0*c, 13.0*c, c]
l2mat[:, 2] = [-13.0*c, -7.0*c, -c, 13.0*c, 7.0*c, c,-13.0*c, -7.0*c, -c, 13.0*c, 7.0*c, c]
l2mat[:, 3] = [7.0*c, c, 13.0*c, -7.0*c, -c, -13.0*c, -7.0*c, -c, -13.0*c, 7.0*c, c, 13.0*c]
l2mat[:, 4] = [-a, z, a, -a, z, a, -a, z, a, -a, z, a]
l2mat[:, 5] = [13.0*c, c, 7.0*c, 13.0*c, c, 7.0*c, -13.0*c, -c, -7.0*c, -13.0*c, -c, -7.0*c]
l2mat[:, 6] = [c, 7.0*c, 13.0*c, -c, -7.0*c, -13.0*c, c, 7.0*c, 13.0*c, -c, -7.0*c, -13.0*c]
l2mat[:, 7] = [-c, -13.0*c, -7.0*c, -c, -13.0*c, -7.0*c, -c, 13.0*c, 7.0*c, c, 13.0*c, 7.0*c]
l2mat[:, 8] = [z, a, -a, z, a, -a, z, a, -a, z, a, -a]
return l2mat
def sf_qpt_wts():
'''
Quadrature point weights for a 10 node tet
'''
wtqp = np.zeros(15)
wtqp[0:4] = 0.602678571428571597e-2
wtqp[4] = 0.302836780970891856e-1
wtqp[5:9] = 0.116452490860289742e-1
wtqp[9:15] = 0.109491415613864534e-1
return wtqp
def sftsfmat():
'''
Creates a NTN array that has the appropriate weights applied at
each quadratutre point.
Also return NT with appropriate weight applied to it
'''
qpt_wts = sf_qpt_wts()
N = sfmat()
NT = N.T
NTN = np.zeros((10,10,15))
for i in range(15):
NTN[:,:,i] = np.outer(N[i,:], N[i,:]) * qpt_wts[i]
NT[:,i] = NT[:,i] * qpt_wts[i]
return (NTN, NT)
def gr_lstq_amat(conn, nsf, ncrds):
'''
Inputs:
conn - the local connectivity array a nelem x 10 size array
nsf - the shape function matrix
ncrds - number of coordinates/nodal points in the grain
Output:
amat - the matrix used in our least squares problem for the grain
It will be constant through out the solution.
'''
nelems = conn.shape[1]
nqpts = nsf.shape[0]
amat = np.zeros((nelems*nqpts, ncrds))
#Build up our A matrix to be used in a least squares solution
j = 0
k = 0
for i in range(nelems):
j = i * nqpts
k = (i + 1) * nqpts
ecrds = np.squeeze(conn[:, i])
amat[j:k, ecrds] = nsf
return amat
def gr_lstq_solver(amat, q_mat, ncrds):
'''
Inputs:
conn - the local connectivity array a nelem x 10 size array
q_mat - vector at each quad point
size = nqpts x nvec x nelems
ncrds - number of coordinates/nodal points in the grain
Output:
nod_mat - the nodal values of a grain for the q_mat
residual - the residual from the least squares
A least squares routine is used to solve for the solution.
It'll find the nodal values of the points at the quadrature mat for
a grain.
'''
nvec = q_mat.shape[1]
nqpts = q_mat.shape[0]
nelems = q_mat.shape[2]
nod_mat = np.zeros((nvec,ncrds), dtype='float64')
b = np.zeros((nqpts*nelems))
residual = np.zeros(nvec)
for i in range(nvec):
b[:] = np.ravel(q_mat[:, i, :], order = 'F')
nod_mat[i, :], residual[i], t1, t2 = np.linalg.lstsq(amat, b)
return (nod_mat, residual)
def gr_nnlstq(amat, q_mat, ncrds):
'''
Inputs:
conn - the local connectivity array a nelem x 10 size array
q_mat - vector at each quad point
size = nqpts x nvec x nelems
ncrds - number of coordinates/nodal points in the grain
Output:
nod_agamma - the nodal values of a grain for the q_mat
residual - the residual from the least squares
A nonnegative nonlinear least squares optimization routine is used to solve for
the solution. It'll find the nodal values of the absolute q_mat for
a grain.
'''
nvec = q_mat.shape[1]
nqpts = q_mat.shape[0]
nelems = q_mat.shape[2]
nod_mat = np.zeros((nvec,ncrds), dtype='float64')
b = np.zeros((nqpts*nelems))
residual = np.zeros(nvec)
for i in range(nvec):
b[:] = np.ravel(q_mat[:, i, :], order = 'F')
nod_mat[i, :], residual[i] = sciop.nnls(amat, b)
return (nod_mat, residual)
def superconvergence_mat(NTN, qpt_det, conn, ncrds):
'''
Input:
NTN - the shape function transpose shape function outer product
matrix with dimensions - nnpe x nnpe x nqpts
qpt_det - the determinate of the jacobian matrix for each
quadrature point of an element - dimensions nelem x nqpts
conn - the connectivity array
ncrds - the number of coordinates
Output:
amat - the superconvergence matrix
'''
nelems = conn.shape[0]
nqpts = NTN.shape[2]
nnpe = NTN.shape[0]
amat = np.zeros((ncrds, ncrds), dtype='float64', order='F')
for i in range(nelems):
for j in range(nqpts):
for k in range(nnpe):
ind = conn[i, k]
amat[ind, conn[i, :]] = amat[ind, conn[i, :]] + NTN[k,:,j] * qpt_det[i,j]
return amat
def superconvergence_vec(NT, qpt_det, conn, qpt_vec, ncrds):
'''
Input
NT - the transpose shape function
qpt_det - the determinate of the jacobian matrix for each
quadrature point of an element - dimensions nelem x nqpts
conn - the connectivity array
qpt_vec - vector at each quad point for nvecs
size = nqpts x nvec x nelems
ncrds - the number of coordinates
Output:
bvec - the integration of NT*qpt_vec over the domain product
size is ncrds x nvec
'''
nqpts = qpt_det.shape[1]
nelems = conn.shape[0]
nvec = qpt_vec.shape[1]
nnpe = conn.shape[1]
bvec = np.zeros((ncrds, nvec), dtype='float64', order='F')
tarr = np.zeros((nqpts), dtype='float64', order='F')
tind = np.zeros((nnpe), dtype='int32', order='F')
for i in range(nvec):
for j in range(nelems):
tind[:] = conn[j, :]
tarr[:] = qpt_vec[:,i,j]*qpt_det[j, :]
bvec[tind, i] = bvec[tind, i] + NT.dot(tarr)
return bvec
def superconvergence_gr_nnlstq(amat, bvec, ncrds):
'''
Inputs:
conn - the local connectivity array a nelem x 10 size array
q_mat - vector at each quad point
size = nqpts x nvec x nelems
ncrds - number of coordinates/nodal points in the grain
Output:
nod_agamma - the nodal values of a grain for the q_mat
residual - the residual from the least squares
A nonnegative nonlinear least squares optimization routine is used to solve for
the solution. It'll find the nodal values of the absolute q_mat for
a grain.
'''
nvec = bvec.shape[1]
nod_mat = np.zeros((ncrds, nvec), dtype='float64', order='F')
b = np.zeros((ncrds), dtype='float64', order='C')
residual = np.zeros((nvec), dtype='float64', order='F')
for i in range(nvec):
b[:] = bvec[:, i]
nod_mat[:, i], residual[i] = sciop.nnls(amat, b)
return (nod_mat.T, residual)
def superconvergence_solve(amat, bvec):
'''
Solves the superconvergence patch test problem to obtain values at the
nodal coordinates
Input:
amat - our superconvergence matrix
bvec - our superconvergence bvec with a size of ncrds x nvec
Output
xvec - our superconvergence nodal solutions with a size of ncrds x nvec
'''
xvec = np.linalg.solve(amat, bvec)
return xvec.T
def superconvergence_solve_cg(NTN, qpt_det, bvec, conn, ncrds):
'''
Solves the superconvergence patch test problem to obtain values at the
nodal coordinates using a preconditioned conjugate gradient solver.
Input:
NTN - the shape function transpose shape function outer product
matrix with dimensions - nnpe x nnpe x nqpts
qpt_det - the determinate of the jacobian matrix for each
quadrature point of an element - dimensions nelem x nqpts
bvec - the integration of NT*qpt_vec over the domain product
size is ncrds x nvec
conn - the connectivity array
ncrds - the number of coordinates
Output:
xvec - our superconvergence nodal solutions with a size of ncrds x nvec
'''
nelems = conn.shape[0]
nvec = bvec.shape[1]
nnpe = conn.shape[1]
nqpts = NTN.shape[2]
mvec = np.zeros((ncrds, 1), dtype='float64', order='F')
tind = np.zeros((nnpe), dtype='int32')
xvec = np.zeros((ncrds, nvec), dtype='float64', order='F')
# We need to first form our preconditioner. A simple Jacobi is good enough
# for our use since our elements results in a pretty sparse and blocked
# series of events of our elements.
for i in range(nelems):
tind[:] = conn[i, :]
for j in range(nqpts):
#The diagonal of our element series
diag_NTN = np.diag(NTN[:, :, j])
mvec[tind] = mvec[tind] + diag_NTN[:] * qpt_det[i, j]
#The inverse of our preconditioner
inv_mvec = 1.0/mvec[:]
#Here we're going to start our pcg solver
for i in range(nvec):
xvec[:, i] = superconvergence_block_pcg(NTN, qpt_det, inv_mvec, bvec[:, i], conn, ncrds)
return xvec
def superconvergence_block_pcg(NTN, qpt_det, inv_mvec, bvec, conn, ncrds):
'''
Solves the superconvergence patch test problem to obtain values at the
nodal coordinates using a preconditioned conjugate gradient solver.
| |
initialize=0)
m.x302 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x303 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x304 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x305 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x306 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x307 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x308 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x309 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x310 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x311 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x312 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x313 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x314 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x315 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x316 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x317 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x318 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x319 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x320 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x321 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x322 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x323 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x324 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x325 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x326 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x327 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x328 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x329 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x330 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x331 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x332 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x333 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x334 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x335 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x336 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x337 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x338 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x339 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x340 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x341 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x342 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x343 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x344 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x345 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x346 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x347 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x348 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x349 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x350 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x351 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x352 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x353 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x354 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x355 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x356 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x357 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x358 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x359 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x360 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x361 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x362 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x363 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x364 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x365 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x366 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x367 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x368 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x369 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x370 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x371 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x372 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x373 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x374 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x375 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x376 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x377 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x378 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x379 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x380 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x381 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x382 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x383 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x384 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x385 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x386 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x387 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x388 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x389 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x390 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x391 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x392 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x393 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x394 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x395 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x396 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x397 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x398 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x399 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x400 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x401 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x402 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x403 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x404 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x405 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x406 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x407 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x408 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x409 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x410 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x411 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x412 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x413 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x414 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x415 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x416 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x417 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x418 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x419 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x420 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x421 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x422 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x423 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x424 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x425 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x426 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x427 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x428 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x429 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x430 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x431 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x432 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x433 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x434 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x435 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x436 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x437 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x438 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x439 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x440 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x441 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x442 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x443 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x444 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x445 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x446 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x447 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x448 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x449 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x450 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x451 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x452 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x453 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x454 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x455 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x456 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x457 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x458 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x459 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x460 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x461 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x462 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x463 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x464 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x465 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x466 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x467 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x468 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x469 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x470 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x471 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x472 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x473 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x474 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x475 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x476 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x477 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x478 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x479 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x480 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x481 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x482 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x483 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x484 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x485 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x486 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x487 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x488 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x489 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x490 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x491 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x492 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x493 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x494 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x495 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x496 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x497 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x498 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x499 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x500 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x501 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x502 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x503 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x504 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x505 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x506 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x507 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x508 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x509 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x510 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x511 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x512 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x513 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x514 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x515 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x516 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x517 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x518 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x519 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x520 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x521 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x522 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x523 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x524 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x525 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x526 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x527 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x528 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x529 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x530 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x531 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x532 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x533 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x534 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x535 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x536 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x537 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x538 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x539 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x540 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x541 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x542 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x543 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x544 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x545 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x546 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x547 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x548 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x549 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x550 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x551 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x552 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x553 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x554 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x555 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x556 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x557 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x558 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x559 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x560 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x561 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x562 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x563 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x564 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x565 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x566 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x567 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x568 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x569 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x570 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x571 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x572 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x573 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x574 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x575 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x576 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x577 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x578 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x579 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x580 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x581 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x582 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x583 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x584 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x585 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x586 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x587 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x588 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x589 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x590 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x591 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x592 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x593 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x594 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x595 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x596 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x597 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x598 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x599 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x600 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x601 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x602 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x603 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x604 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x605 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x606 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x607 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x608 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x609 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x610 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x611 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x612 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x613 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x614 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x615 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x616 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x617 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x618 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x619 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x620 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x621 = Var(within=Reals, bounds=(0,None), | |
<filename>pdfa_parser/avisleser.py<gh_stars>10-100
#!/usr/bin/env python
import argparse
import faulthandler
import io
import logging
import os
import re
import signal
import statistics
import string
import sys
import tarfile
import traceback
from collections import Counter
from collections.abc import Iterable
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from typing import Iterator, List, NoReturn, Optional, Tuple, Any, Union
# import warnings; warnings.filterwarnings("ignore")
from joblib import Parallel, delayed
from joblib import Memory
import fitz
from pdfminer.high_level import extract_pages, extract_text_to_fp
from pdfminer.layout import LTTextContainer, LTAnno, LTChar, LAParams, LTPage
from sentence_splitter import SentenceSplitter, split_text_into_sentences
from tqdm import tqdm
SPACES_RE = re.compile(r"[ ][ ]+")
STARTEND_RE = re.compile(r"(\n\s+)")
HYPHENS_RE = re.compile(r"([a-zæåø])-\s+([a-zæåø])")
HYPHENSHASH_RE = re.compile(r"([a-zæåø])-#\s+([a-zæåø])")
LINEBREAK_RE = re.compile(r"[\n]")
LINEBREAKS_RE = re.compile(r"[\n]{2,}")
SPLITTER = SentenceSplitter(language='no')
LOGGER = None
NOW = datetime.now()
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds, description=None):
def signal_handler(signum, frame):
raise TimeoutException(description or "Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def get_logger() -> logging.Logger:
"""
Get a logger
"""
global LOGGER
if LOGGER is None:
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d - %H:%M:%S"
)
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
console.setFormatter(formatter)
LOGGER.addHandler(console)
return LOGGER
def process_text_container(element: LTTextContainer) -> Tuple[List[str], float]:
fontnames = []
fontsize = 0
if isinstance(element, Iterable):
for text_line in element:
if isinstance(text_line, Iterable):
for character in text_line:
if isinstance(character, LTChar):
fontnames.append((character.fontname, character.size))
if fontsize < character.size:
fontsize = character.size
elif isinstance(text_line, LTChar):
fontnames.append((text_line.fontname, text_line.size))
if fontsize < text_line.size:
fontsize = text_line.size
elif isinstance(element, LTChar):
fontnames.append((element.fontname, element.size))
if fontsize < element.size:
fontsize = element.size
return fontnames, fontsize
def traverse(element: Union[Iterable, LTTextContainer]) -> Tuple[List[str], float]:
fontnames = []
fontsize = 0
if isinstance(element, LTTextContainer):
return process_text_container(element)
elif isinstance(element, Iterable):
for item in element:
element_output = traverse(item)
if element_output:
element_fontnames, element_fontsize = element_output
fontnames += element_fontnames
if fontsize < element_fontsize:
fontsize = element_fontsize
return fontnames, fontsize
def get_most_frequent_and_largest_fonts(
pages: List[LTPage]
) -> Tuple[str, float]:
all_fonts, fontsize = traverse(pages)
fontnames = Counter(all_fonts)
return (
fontnames.most_common(1)[0][0][0] if fontnames else None,
fontnames.most_common(1)[0][0][1] if fontnames else None,
fontsize if fontsize > 0 else None
)
def get_text_containers(element: Any) -> List[LTTextContainer]:
containers = []
if isinstance(element, LTTextContainer):
return [element]
elif isinstance(element, Iterable):
for item in element:
element_output = get_text_containers(item)
if element_output:
containers += element_output
return containers
def get_text_line(
text_line: List,
line_number: int,
font: str,
size: float,
previous_line_font: str,
) -> str:
chars = ""
line_font = ""
if isinstance(text_line, Iterable):
for character_number, character in enumerate(text_line):
char = " "
if isinstance(character, LTChar):
char_font = getattr(character, "fontname")
char_size = getattr(character, "size")
if char_font == font or char_size == size:
char = character.get_text()
# TODO: Identify subheadings
# if (getattr(character, "fontname") != font
# and getattr(character, "fontname") != previous_line_font
# and getattr(character, "size") == size
# and line_number == 0
# and character_number == 0):
# char = f"\n→ {char}"
# char += character.get_text()
if char.strip():
line_font = getattr(character, "fontname", "")
chars = f"{chars}{char}"
else:
try:
chars = text_line.get_text()
except:
pass
return chars, line_font
def get_text(
pages: List[LTPage],
font: str,
size: float,
page_break: Optional[str]=None,
) -> str:
text = ""
for page_layout in pages:
for box_id, element in enumerate(page_layout):
if isinstance(element, LTTextContainer):
last_font = ""
for line_number, text_line in enumerate(element):
chars, last_font = get_text_line(text_line, line_number, font, size, last_font)
text = f"{text} {chars} "
text = f"{text}\n"
if page_break and text.strip():
text = f"{text}{page_break}"
return text
def get_unstructured_text(
pages: List[LTPage],
font: str,
size: float,
page_break: Optional[str]=None,
) -> str:
text = ""
for element in get_text_containers(pages):
last_font = ""
for line_number, text_line in enumerate(element):
chars, last_font = get_text_line(text_line, line_number, font, size, last_font)
text = f"{text} {chars} "
text = f"{text}\n"
if page_break and text.strip():
text = f"{text}{page_break}"
return text
def get_all_texts(
filename: Union[str, Path],
line_margin: float=0.15,
detect_vertical: bool=-0.8,
boxes_flow: Optional[float]=None,
page_break: Optional[str]=None,
) -> str:
laparams = LAParams(
line_margin=line_margin,
boxes_flow=boxes_flow,
detect_vertical=detect_vertical,
all_texts=True,
)
pages = list(extract_pages(filename, laparams=laparams))
font, size, _ = get_most_frequent_and_largest_fonts(pages)
text = get_unstructured_text(pages, font, size, page_break)
if text.strip():
return text, None
with open(filename, 'rb') as file, io.StringIO() as buffer:
extract_text_to_fp(file, buffer, laparams=laparams)
text = buffer.getvalue().strip()
html = None # disabling HTML for now
# with open(filename, 'rb') as file, io.StringIO() as buffer:
# extract_text_to_fp(
# file, buffer, laparams=laparams, output_type='html', codec=None
# )
# html = buffer.getvalue().strip()
# return LINEBREAK_RE.sub(r" ", text), html
return text, html
def reformat(
text: str,
single_hyphens: bool=True,
page_break: Optional[str]=None,
) -> str:
if not page_break:
return reformat_page(text, single_hyphens=single_hyphens)
else:
return f"\n{page_break}\n".join(
reformat_page(text_page.strip(), single_hyphens=single_hyphens)
for text_page in text.split(page_break)
)
def reformat_page(text: str, single_hyphens: bool=True) -> str:
text = SPACES_RE.sub(r" ", text)
if single_hyphens:
text = HYPHENS_RE.sub(r"\1\2", text)
else:
pass
text = HYPHENSHASH_RE.sub(r"\1\2", text)
text = "\n".join(line.strip() for line in text.split("\n"))
text = LINEBREAKS_RE.sub("\n\n", text)
blocks = []
for block in text.split("\n\n"):
lines = []
for line in block.split("\n"):
if all(char in string.digits for char in line if char != ""):
lines.append("\n" + line.strip() + "\n")
else:
lines.append(line.strip())
blocks.append(" ".join(lines).strip())
text = "\n\n".join(blocks)
text = "\n".join(line.strip() for line in text.split("\n"))
return text
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def get_text_pdfminer(
filename: str,
line_margin: float=0.15,
detect_vertical: bool=-0.8,
all_texts: bool=False,
boxes_flow: Optional[float]=None,
same_sizes: Optional[bool]=False,
occurrence_rate: Optional[bool]=None,
page_break: Optional[str]=None,
contents: Optional[io.BytesIO]=None,
) -> str:
text = ""
html = None
laparams = LAParams(
line_margin=line_margin,
boxes_flow=boxes_flow,
detect_vertical=detect_vertical,
all_texts=False
)
pages = list(extract_pages(filename, laparams=laparams))
font, size, _ = get_most_frequent_and_largest_fonts(pages)
text = get_text(pages, font, size, page_break)
if len(text.strip()) == 0 and all_texts:
text, html = get_all_texts(
filename,
line_margin=line_margin,
boxes_flow=boxes_flow,
detect_vertical=detect_vertical,
)
return reformat(text, page_break=page_break).strip(), html
def get_text_fitz(
filename: str,
line_margin: float=0.15,
detect_vertical: bool=-0.8,
all_texts: bool=False,
boxes_flow: Optional[float]=None,
same_sizes: Optional[bool]=False,
occurrence_rate: Optional[bool]=None,
page_break: Optional[str]=None,
contents: Optional[io.BytesIO]=None,
) -> str:
faulthandler.enable()
# Disable ascender/descender values as per
# https://github.com/pymupdf/PyMuPDF/issues/930 and
# https://bugs.ghostscript.com/show_bug.cgi?id=703649
fitz.TOOLS.unset_quad_corrections(True)
if contents is None:
pdf = fitz.open(filename)
else:
pdf = fitz.Document(stream=contents, filetype=filename.name)
text = []
for page in pdf:
fonts = []
lengths = []
page_dict = page.get_text("dict", flags=0)
for block in page_dict.get("blocks", []):
for line in block.get("lines", []):
line_text = ""
chars = ""
for span in line.get("spans", []):
span_font = span.get("font", "").split(",")[0].split("-")[0]
span_text = span.get("text", "")
chars = ""
for char in span_text:
if char.strip() != "":
fonts.append((span_font, span["size"], span["color"]))
chars += char
line_text += span_text.strip()
if chars:
lengths.append(len(chars))
if not fonts or not lengths:
continue
if occurrence_rate is not None:
counts = Counter(fonts)
freqs = [(i, counts[i] / len(fonts))
for i, count in counts.most_common()]
font_tuples = set(
font_tuple for font_tuple, freq in freqs
if freq >= occurrence_rate
)
font, size, color = list(zip(*font_tuples))
else:
font, size, color = Counter(fonts).most_common(1)[0][0]
font, size, color = [font], [size], [color]
font, size, color = set(font), set(size), set(color)
# if len(lengths) > 1:
# lengths_std = statistics.stdev(lengths)
# lengths_mean = statistics.mean(lengths)
# else:
# lengths_std = 0 # Not sure about this
# lengths_mean = len(lengths)
for block in page_dict.get("blocks", []):
for line in block.get("lines", []):
line_text = ""
for span_index, span in enumerate(line.get("spans", [])):
span_text = span["text"].strip()
if (span_text
and any(span["font"].startswith(f) for f in font)
and any(span["color"] == c for c in color)
and (any(span["size"] == s for s in size)
or not same_sizes)
and span["flags"] in (0, 4, 6)
and line["wmode"] == 0
):
line_text += span["text"]
if len(line_text) > 2 and line_text.rstrip()[-1] == "-":
line_text += "#"
text.append(line_text)
text.append(" ")
text.append("\n")
text.append("\n")
if page_break and "".join(text).strip():
text.append(page_break)
text = reformat("".join(text), page_break=page_break, single_hyphens=False)
if "-#" in text:
text = text.replace("-#", "-")
return text, None
def get_text_from_pdf_or_tar(
filename: str,
pdfs_dir: Union[Path, str],
output: Union[Path, str],
overwrite: bool=False,
bar: Optional[tqdm]=None,
line_margin: float=0.15,
detect_vertical: bool=-0.8,
all_texts: bool=False,
boxes_flow: Optional[float]=None,
skip_empty: Optional[bool]=True,
same_sizes: Optional[bool]=False,
occurrence_rate: Optional[bool]=None,
page_break: Optional[str]=None,
) -> NoReturn:
if ".tar" in filename.suffixes or ".tgz" in filename.suffixes:
if ".gz" in filename.suffixes or ".tgz" in filename.suffixes:
mode = "r:gz'"
else:
mode = "r"
tar = tarfile.open(filename, mode=mode)
if bar is not None:
pdf_names = tqdm(tar.getnames(), desc=" - ")
else:
pdf_names = tar.getnames()
for pdf_name in pdf_names:
if pdf_name.endswith(".pdf"):
pdf_file = tar.extractfile(pdf_name)
pdf_bytes = io.BytesIO(pdf_file.read())
get_text_from_pdf(
filename=Path(pdf_name),
pdfs_dir=pdfs_dir,
output=output,
overwrite=overwrite,
bar=bar,
line_margin=line_margin,
detect_vertical=detect_vertical,
all_texts=all_texts,
boxes_flow=boxes_flow,
skip_empty=skip_empty,
same_sizes=same_sizes,
occurrence_rate=occurrence_rate,
page_break=page_break,
contents=pdf_bytes,
)
else:
get_text_from_pdf(
filename=filename,
pdfs_dir=pdfs_dir,
output=output,
overwrite=overwrite,
bar=bar,
line_margin=line_margin,
detect_vertical=detect_vertical,
all_texts=all_texts,
boxes_flow=boxes_flow,
skip_empty=skip_empty,
same_sizes=same_sizes,
occurrence_rate=occurrence_rate,
page_break=page_break,
)
def get_text_from_pdf(
filename: str,
pdfs_dir: Union[Path, str],
output: Union[Path, str],
overwrite: bool=False,
bar: Optional[tqdm]=None,
line_margin: float=0.15,
detect_vertical: bool=-0.8,
all_texts: bool=False,
boxes_flow: Optional[float]=None,
| |
import abc
import functools
import numpy as np
import scipy.linalg
from probnum import diffeq, random_variables, statespace, utils
from probnum._randomvariablelist import _RandomVariableList
from bvps import (
bridges,
bvp_initialise,
control,
error_estimates,
kalman,
mesh,
ode_measmods,
problems,
quadrature,
stopcrit,
)
class BVPSolver:
def __init__(
self,
dynamics_model,
error_estimator,
initial_sigma_squared=1e10,
):
self.dynamics_model = dynamics_model
self.error_estimator = error_estimator
self.initial_sigma_squared = initial_sigma_squared
self.localconvrate = self.dynamics_model.ordint # + 0.5?
@classmethod
def from_default_values_std_refinement(
cls,
dynamics_model,
use_bridge=True,
initial_sigma_squared=1e10,
normalise_with_interval_size=False,
):
quadrature_rule = quadrature.expquad_interior_only()
P0 = dynamics_model.proj2coord(0)
P1 = dynamics_model.proj2coord(1)
error_estimator = ErrorViaStandardDeviation(
atol=None,
rtol=None,
quadrature_rule=quadrature_rule,
P0=P0,
P1=P1,
normalise_with_interval_size=normalise_with_interval_size,
)
return cls(
dynamics_model=dynamics_model,
error_estimator=error_estimator,
initial_sigma_squared=initial_sigma_squared,
)
@classmethod
def from_default_values(
cls,
dynamics_model,
initial_sigma_squared=1e10,
use_bridge=True,
normalise_with_interval_size=False,
):
quadrature_rule = quadrature.expquad_interior_only()
P0 = dynamics_model.proj2coord(0)
P1 = dynamics_model.proj2coord(1)
error_estimator = ErrorViaResidual(
atol=None,
rtol=None,
quadrature_rule=quadrature_rule,
P0=P0,
P1=P1,
normalise_with_interval_size=normalise_with_interval_size,
)
return cls(
dynamics_model=dynamics_model,
error_estimator=error_estimator,
initial_sigma_squared=initial_sigma_squared,
)
@classmethod
def from_default_values_probabilistic_refinement(
cls,
dynamics_model,
initial_sigma_squared=1e10,
normalise_with_interval_size=False,
):
quadrature_rule = quadrature.expquad_interior_only()
P0 = dynamics_model.proj2coord(0)
P1 = dynamics_model.proj2coord(1)
error_estimator = ErrorViaProbabilisticResidual(
atol=None,
rtol=None,
quadrature_rule=quadrature_rule,
P0=P0,
P1=P1,
normalise_with_interval_size=normalise_with_interval_size,
)
return cls(
dynamics_model=dynamics_model,
error_estimator=error_estimator,
initial_sigma_squared=initial_sigma_squared,
)
def compute_initialisation(
self, bvp, initial_grid, initial_guess=None, use_bridge=True
):
# Check that initial grid covers the domain
np.testing.assert_allclose(bvp.t0, initial_grid[0])
np.testing.assert_allclose(bvp.tmax, initial_grid[-1])
# Check that initial guess satisfies the BC
# Create bridge
initrv_not_bridged = self.create_initrv()
if use_bridge:
dynamics_model, initrv = self.initialise_bridge(bvp, initrv_not_bridged)
else:
dynamics_model, initrv = self.dynamics_model, initrv_not_bridged
filter_object = kalman.MyKalman(
dynamics_model, measurement_model=None, initrv=initrv
)
# Create Measmodlist and zero data
N = len(initial_grid)
d = bvp.dimension
if initial_guess is not None and use_bridge == True:
initguess_measmodfun = self.initial_guess_measurement_model_function(
damping=1e-6
)
else:
initguess_measmodfun = self.initial_guess_measurement_model_function(
damping=0.0
)
if initial_guess is None:
initial_guess_full = [None] * N
else:
initial_guess_full = initial_guess
if isinstance(bvp, problems.SecondOrderBoundaryValueProblem):
ode_measmod = ode_measmods.from_second_order_ode(bvp, self.dynamics_model)
else:
ode_measmod = ode_measmods.from_ode(bvp, self.dynamics_model)
measmod_list = [
ode_measmod if el is None else initguess_measmodfun(el)
for el in initial_guess_full
]
left_measmod, right_measmod = ode_measmods.from_boundary_conditions(
bvp, self.dynamics_model
)
if initial_guess is None and use_bridge == False:
measmod_list[0] = [left_measmod, measmod_list[0]]
measmod_list[-1] = [measmod_list[-1], right_measmod]
dataset = np.zeros((N, d))
# Filter
kalman_posterior = filter_object.filtsmooth(
dataset=dataset,
times=initial_grid,
measmod_list=measmod_list,
)
sigmas = filter_object.sigmas
normalisation = filter_object.normalisation_for_sigmas
sigma_squared = np.sum(sigmas) / normalisation
return kalman_posterior, sigma_squared
def initialise_bridge(self, bvp, initrv_not_bridged):
bridge_prior = bridges.GaussMarkovBridge(self.dynamics_model, bvp)
initrv_bridged = bridge_prior.initialise_boundary_conditions(initrv_not_bridged)
return bridge_prior, initrv_bridged
def initial_guess_measurement_model_function(self, damping=0.0):
projmat = self.dynamics_model.proj2coord(0)
d = projmat.shape[0]
variances = damping * np.ones(d)
process_noise_cov = np.diag(variances)
process_noise_cov_cholesky = np.diag(np.sqrt(variances))
measmodfun = lambda s: statespace.DiscreteLTIGaussian(
state_trans_mat=projmat,
shift_vec=-s,
proc_noise_cov_mat=process_noise_cov,
proc_noise_cov_cholesky=process_noise_cov_cholesky,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
return measmodfun
def solve(self, *args, **kwargs):
for kalman_posterior, _ in self.solution_generator(*args, **kwargs):
pass
return kalman_posterior
def solution_generator(
self,
bvp,
atol,
rtol,
initial_posterior,
maxit_ieks=10,
maxit_em=1,
yield_ieks_iterations=False,
):
self.error_estimator.set_tolerance(atol=atol, rtol=rtol)
kalman_posterior = initial_posterior
times = kalman_posterior.locations
dataset = np.zeros((len(times), bvp.dimension))
# Create data and measmods
ode_measmod, left_measmod, right_measmod = self.choose_measurement_model(bvp)
measmod_list = self.create_measmod_list(
ode_measmod, left_measmod, right_measmod, times
)
filter_object = self.setup_filter_object(bvp)
linearise_at = kalman_posterior.state_rvs
acceptable_intervals = np.zeros(len(times[1:]), dtype=bool)
while np.any(np.logical_not(acceptable_intervals)):
# EM iterations
for _ in range(maxit_em):
# IEKS iterations
for _ in range(maxit_ieks):
lin_measmod_list = self.linearise_measmod_list(
measmod_list, linearise_at, times
)
kalman_posterior = filter_object.filtsmooth(
dataset=dataset, times=times, measmod_list=lin_measmod_list
)
sigmas = filter_object.sigmas
sigma_squared = np.mean(sigmas) / bvp.dimension
linearise_at = kalman_posterior.state_rvs
if yield_ieks_iterations:
yield kalman_posterior, sigma_squared
filter_object.initrv = self.update_initrv(
kalman_posterior, filter_object.initrv
)
yield kalman_posterior, sigma_squared
# Recalibrate diffusion
filter_object.initrv = self.update_covariances_with_sigma_squared(
filter_object.initrv, sigma_squared
)
candidate_nodes = construct_candidate_nodes(
current_mesh=times,
nodes_per_interval=self.error_estimator.quadrature_rule.nodes,
)
evaluated_posterior = kalman_posterior(candidate_nodes)
mm_list = [ode_measmod] * len(candidate_nodes)
(
per_interval_error,
acceptable,
) = self.error_estimator.estimate_error_per_interval(
evaluated_posterior,
candidate_nodes,
times,
sigma_squared,
ode_measmod_list=mm_list,
)
times, acceptable_intervals = refine_mesh(
current_mesh=times,
error_per_interval=per_interval_error,
localconvrate=self.localconvrate,
quadrature_nodes=self.error_estimator.quadrature_rule.nodes,
)
dataset = np.zeros((len(times), bvp.dimension))
measmod_list = self.create_measmod_list(
ode_measmod, left_measmod, right_measmod, times
)
linearise_at = kalman_posterior(times)
def setup_filter_object(self, bvp):
initrv_not_bridged = self.create_initrv()
initrv_not_bridged = self.update_covariances_with_sigma_squared(
initrv_not_bridged, self.initial_sigma_squared
)
filter_object = kalman.MyKalman(self.dynamics_model, None, initrv_not_bridged)
return filter_object
def create_initrv(self):
m0 = np.ones(self.dynamics_model.dimension)
c0 = self.initial_sigma_squared * np.ones(self.dynamics_model.dimension)
C0 = np.diag(c0)
initrv_not_bridged = random_variables.Normal(m0, C0, cov_cholesky=np.sqrt(C0))
return initrv_not_bridged
def update_covariances_with_sigma_squared(self, initrv_not_bridged, sigma_squared):
"""Include sigma into initial covariance and process noise."""
sigma = np.sqrt(sigma_squared)
self.dynamics_model.equivalent_discretisation_preconditioned._proc_noise_cov_cholesky *= (
sigma
)
self.dynamics_model.equivalent_discretisation_preconditioned.proc_noise_cov_mat *= (
sigma_squared
)
return initrv_not_bridged
def choose_measurement_model(self, bvp):
if isinstance(bvp, problems.SecondOrderBoundaryValueProblem):
ode_measmod = ode_measmods.from_second_order_ode(bvp, self.dynamics_model)
else:
ode_measmod = ode_measmods.from_ode(bvp, self.dynamics_model)
left_measmod, right_measmod = ode_measmods.from_boundary_conditions(
bvp, self.dynamics_model
)
return ode_measmod, left_measmod, right_measmod
def create_measmod_list(self, ode_measmod, left_measmod, right_measmod, times):
N = len(times)
if N < 3:
raise ValueError("Too few time steps")
measmod_list = [[left_measmod, ode_measmod]]
measmod_list.extend([ode_measmod] * (N - 2))
measmod_list.extend([[right_measmod, ode_measmod]])
return measmod_list
def linearise_measmod_list(self, measmod_list, states, times):
lin_measmod_list = [
mm.linearize(state) for (mm, state) in zip(measmod_list[1:-1], states[1:-1])
]
mm0 = measmod_list[0][0]
lm0 = measmod_list[0][1].linearize(states[0])
mm1 = measmod_list[-1][0]
lm1 = measmod_list[-1][1].linearize(states[-1])
lin_measmod_list.insert(0, [mm0, lm0])
lin_measmod_list.append([mm1, lm1])
return lin_measmod_list
def update_initrv(self, kalman_posterior, previous_initrv):
"""EM update for initial RV."""
inferred_initrv = kalman_posterior.states[0]
new_mean = inferred_initrv.mean
new_cov_cholesky = utils.linalg.cholesky_update(
inferred_initrv.cov_cholesky,
inferred_initrv.mean - previous_initrv.mean,
)
new_cov_cholesky += 1e-6 * np.eye(len(new_cov_cholesky))
new_cov = new_cov_cholesky @ new_cov_cholesky.T
return random_variables.Normal(
mean=new_mean, cov=new_cov, cov_cholesky=new_cov_cholesky
)
#
# def estimate_squared_error(self, kalman_posterior, mesh_candidates, sigma_squared):
# evaluated_posterior = kalman_posterior(mesh_candidates)
#
# squared_error = evaluated_posterior.var * sigma_squared
# reference = evaluated_posterior.mean
# info = {"evaluated_posterior": evaluated_posterior}
# return squared_error, reference, info
# else:
# measmod_list = [[measmodfun(s=initial_guess[0])]]
# measmod_list.extend([measmodfun(s=d) for d in initial_guess[1:-1]])
# measmod_list.extend([[measmodfun(s=initial_guess[-1])]])
# return measmod_list
########################################################################
########################################################################
# Mesh refinement
########################################################################
########################################################################
#
# def insert_quadrature_nodes(mesh, nodes_per_interval, where):
# """Insert 5-pt Lobatto points into a mesh."""
# new_candidates = construct_candidate_nodes(mesh, nodes_per_interval, where)
# return np.union1d(mesh, new_candidates)
#
def refine_mesh(current_mesh, error_per_interval, localconvrate, quadrature_nodes):
"""Refine the mesh.
Examples
--------
>>> current_mesh = [0., 0.5, 1.0, 2.0]
>>> error_per_interval = [1000., 10., 0.1]
>>> localconvrate = 3.5
>>> quadrature_nodes = [0.3, 0.5, 0.7]
>>> new_mesh, acceptable = refine_mesh(current_mesh, error_per_interval, localconvrate, quadrature_nodes)
>>> print(new_mesh)
[0. 0.15 0.35 0.5 0.75 1. 2. ]
>>> print(acceptable)
[False False True]
"""
current_mesh = np.asarray(current_mesh)
error_per_interval = np.asarray(error_per_interval)
acceptable = error_per_interval < 1.0
if np.all(acceptable):
return current_mesh, acceptable
threshold_two_instead_of_one = 3.0 ** localconvrate
insert_one_here = np.logical_and(
1.0 <= error_per_interval, error_per_interval <= threshold_two_instead_of_one
)
insert_two_here = threshold_two_instead_of_one < error_per_interval
left_node, central_node, right_node = quadrature_nodes
one_inserted = construct_candidate_nodes(
current_mesh, [central_node], where=insert_one_here
)
two_inserted = construct_candidate_nodes(
current_mesh, [left_node, right_node], where=insert_two_here
)
new_mesh = functools.reduce(np.union1d, (current_mesh, one_inserted, two_inserted))
return new_mesh, acceptable
def construct_candidate_nodes(current_mesh, nodes_per_interval, where=None):
"""Construct nodes that are located in-between mesh points.
Examples
--------
>>> current_mesh = [0., 0.5, 1.0, 2.0]
>>> nodes_per_interval = [0.3, 0.5, 0.7]
>>> candidate_nodes = construct_candidate_nodes(current_mesh, nodes_per_interval)
>>> print(candidate_nodes)
[0.15 0.25 0.35 0.65 0.75 0.85 1.3 1.5 1.7 ]
>>> where = [True, False, False]
>>> candidate_nodes = construct_candidate_nodes(current_mesh, nodes_per_interval, where=where)
>>> print(candidate_nodes)
[0.15 0.25 0.35]
"""
current_mesh = np.asarray(current_mesh)
if where is None:
where = np.ones_like(current_mesh[1:], dtype=bool)
diff = np.diff(current_mesh)
new_mesh = []
for node in nodes_per_interval:
new_pts = current_mesh[:-1] + diff * node
new_mesh = np.union1d(new_mesh, new_pts[where])
return new_mesh
########################################################################
########################################################################
# Error estimation
########################################################################
########################################################################
class BVPErrorEstimator(abc.ABC):
def __init__(
self,
atol,
rtol,
quadrature_rule,
P0=None,
P1=None,
normalise_with_interval_size=False,
):
self.quadrature_rule = quadrature_rule
self.atol = atol
self.rtol = rtol
self.normalise_with_interval_size = normalise_with_interval_size
# Projection matrices: state to 0th/1st derivative.
self.P0 = P0
self.P1 = P1
def set_tolerance(self, atol, rtol):
self.atol = atol
self.rtol = rtol
def estimate_error_per_interval(
self,
evaluated_posterior,
mesh_candidates,
current_mesh,
calibrated_sigma_squared,
ode_measmod_list=None,
):
"""Estimate error per interval.
Numerically approximate the integrated error estimate per subinterval.
"""
assert self.quadrature_rule.order == 5
squared_error, reference, info = self.estimate_squared_error_at_points(
evaluated_posterior,
mesh_candidates,
calibrated_sigma_squared,
ode_measmod_list,
)
normalisation = (self.atol + self.rtol * np.abs(reference)) ** 2
normalised_squared_error = squared_error / normalisation
normalised_error = np.sqrt(normalised_squared_error)
dim = len(normalised_error[0])
integrand = np.linalg.norm(normalised_error, axis=1) ** 2 / dim
per_interval_error = np.abs(
integrand.reshape((-1, self.quadrature_rule.order - 2))
@ self.quadrature_rule.weights
)
if self.normalise_with_interval_size:
dt = np.diff(current_mesh)
return np.sqrt(per_interval_error / dt), info
return np.sqrt(per_interval_error), info
@abc.abstractmethod
def estimate_squared_error_at_points(
self, evaluated_posterior, points, calibrated_sigma_squared
):
raise NotImplementedError
class ErrorViaStandardDeviation(BVPErrorEstimator):
"""The posterior standard deviation is the error estimate.
Examples
--------
>>> from probnum._randomvariablelist import _RandomVariableList
>>> from probnum import random_variables
>>> from bvps import quadrature
>>> quadrule = quadrature.QuadratureRule(nodes=[0.3, 0.5, 0.6], weights=[1./3., 1./3., 1./3.], order=5)
>>> estimator = ErrorViaStandardDeviation(atol=0.5, rtol=0.5, quadrature_rule=quadrule)
>>> dummy_rv = random_variables.Normal(mean=np.ones(1), cov=np.eye(1))
>>> evaluated_posterior = _RandomVariableList([dummy_rv]*12)
>>> current_mesh = np.arange(0., 5., step=1.)
>>> mesh_candidates = construct_candidate_nodes(current_mesh, quadrule.nodes)
>>> calibrated_sigma_squared = 9
>>> error, _ = estimator.estimate_error_per_interval(evaluated_posterior, mesh_candidates,current_mesh, calibrated_sigma_squared)
>>> print(error)
[3. 3. 3. 3.]
>>> calibrated_sigma_squared = 100
>>> error, _ = estimator.estimate_error_per_interval(evaluated_posterior, mesh_candidates,current_mesh, calibrated_sigma_squared)
>>> print(error)
[10. 10. 10. 10.]
"""
| |
<gh_stars>1-10
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the business-filing end-point.
Test-Suite to ensure that the /businesses endpoint is working as expected.
"""
import asyncio
import json
from datetime import datetime
from http import HTTPStatus
import dpath.util
import pytest
from flask import current_app
from legal_api.services import QueueService
from legal_api.services.authz import BASIC_USER, COLIN_SVC_ROLE, STAFF_ROLE
from tests import integration_nats, integration_payment
from tests.unit.models import AR_FILING, factory_business, factory_business_mailing_address, factory_filing
from tests.unit.services.utils import create_header
def test_get_all_business_filings_only_one_in_ledger(session, client, jwt):
"""Assert that the business info can be received in a valid JSONSchema format."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
ar = copy.deepcopy(AR_FILING)
ar['filing']['header']['filingId'] = filings.id
ar['filing']['header']['colinId'] = None
print('test_get_all_business_filings - filing:', filings)
rv = client.get(f'/api/v1/businesses/{identifier}/filings',
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.OK
assert len(rv.json.get('filings')) == 0 # The endpoint will return only completed filings
def test_get_all_business_filings_multi_in_ledger(session, client, jwt):
"""Assert that the business info can be received in a valid JSONSchema format."""
import copy
from tests import add_years
ar = copy.deepcopy(AR_FILING)
identifier = 'CP7654321'
# create business
b = factory_business(identifier)
# add 3 filings, add a year onto the AGM date
for i in range(0, 3):
ar['filing']['annualReport']['annualGeneralMeetingDate'] = \
datetime.date(add_years(datetime(2001, 8, 5, 7, 7, 58, 272362), i)).isoformat()
factory_filing(b, ar)
rv = client.get(f'/api/v1/businesses/{identifier}/filings',
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.OK
assert len(rv.json.get('filings')) == 0
def test_get_one_business_filing_by_id(session, client, jwt):
"""Assert that the business info cannot be received in a valid JSONSchema format."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
ar = copy.deepcopy(AR_FILING)
ar['filing']['header']['filingId'] = filings.id
ar['filing']['header']['colinId'] = None
rv = client.get(f'/api/v1/businesses/{identifier}/filings/{filings.id}',
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.OK
assert rv.json['filing']['annualReport'] == ar['filing']['annualReport']
assert rv.json['filing']['business'] == ar['filing']['business']
def test_get_404_when_business_invalid_filing_id(session, client, jwt):
"""Assert that the business info cannot be received in a valid JSONSchema format."""
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
print('test_get_one_business_filing - filing:', filings)
print(f'/api/v1/businesses/{identifier}/filings/{filings.id}')
rv = client.get(f'/api/v1/businesses/{identifier}/filings/{filings.id + 1}',
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json == {'message': f'{identifier} no filings found'}
def test_get_404_filing_with_invalid_business(session, client, jwt):
"""Assert that a filing cannot be created against non-existent business."""
identifier = 'CP7654321'
filings_id = 1
rv = client.get(f'/api/v1/businesses/{identifier}/filings/{filings_id}',
headers=create_header(jwt, [STAFF_ROLE], identifier))
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json == {'message': f'{identifier} not found'}
def test_post_fail_if_given_filing_id(session, client, jwt):
"""Assert that a filing cannot be created against a given filing_id."""
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
rv = client.post(f'/api/v1/businesses/{identifier}/filings/{filings.id}',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.FORBIDDEN
assert rv.json['errors'][0] == {'message':
f'Illegal to attempt to create a duplicate filing for {identifier}.'}
def test_post_filing_no_business(session, client, jwt):
"""Assert that a filing cannot be created against non-existent business."""
identifier = 'CP7654321'
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json['errors'][0] == {'message': f'{identifier} not found'}
def test_post_empty_ar_filing_to_a_business(session, client, jwt):
"""Assert that an empty filing cannot be posted."""
identifier = 'CP7654321'
factory_business(identifier)
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=None,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'][0] == {'message': f'No filing json data in body of post for {identifier}.'}
def test_post_authorized_draft_ar(session, client, jwt):
"""Assert that a unpaid filing can be posted."""
identifier = 'CP7654321'
factory_business(identifier)
rv = client.post(f'/api/v1/businesses/{identifier}/filings?draft=true',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.CREATED
def test_post_not_authorized_draft_ar(session, client, jwt):
"""Assert that a unpaid filing can be posted."""
identifier = 'CP7654321'
factory_business(identifier)
rv = client.post(f'/api/v1/businesses/{identifier}/filings?draft=true',
json=AR_FILING,
headers=create_header(jwt, [BASIC_USER], 'WRONGUSER')
)
assert rv.status_code == HTTPStatus.UNAUTHORIZED
def test_post_draft_ar(session, client, jwt):
"""Assert that a unpaid filing can be posted."""
identifier = 'CP7654321'
factory_business(identifier)
rv = client.post(f'/api/v1/businesses/{identifier}/filings?draft=true',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.CREATED
assert not rv.json['filing']['header'].get('paymentToken')
assert rv.json['filing']['header']['filingId']
def test_post_only_validate_ar(session, client, jwt):
"""Assert that a unpaid filing can be posted."""
identifier = 'CP7654321'
factory_business(identifier)
rv = client.post(f'/api/v1/businesses/{identifier}/filings?only_validate=true',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.OK
assert not rv.json.get('errors')
def test_post_only_validate_error_ar(session, client, jwt):
"""Assert that a unpaid filing can be posted."""
import copy
identifier = 'CP7654321'
factory_business(identifier)
ar = copy.deepcopy(AR_FILING)
ar['filing']['header'].pop('name')
rv = client.post(f'/api/v1/businesses/{identifier}/filings?only_validate=true',
json=ar,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json.get('errors')
assert rv.json['errors'][0]['error'] == "'name' is a required property"
@integration_payment
def test_post_valid_ar(session, client, jwt):
"""Assert that a unpaid filing can be posted."""
from legal_api.models import Address, Filing
identifier = 'CP7654321'
business = factory_business(identifier)
mailing_address = Address(city='Test Mailing City', address_type=Address.MAILING)
business.mailing_address.append(mailing_address)
business.save()
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
# check return
assert rv.status_code == HTTPStatus.CREATED
assert not rv.json.get('errors')
assert rv.json['filing']['header']['filingId']
assert rv.json['filing']['header']['paymentToken']
assert rv.json['filing']['header']['paymentToken'] == '<PASSWORD>'
# check stored filing
filing = Filing.get_filing_by_payment_token(rv.json['filing']['header']['paymentToken'])
assert filing
assert filing.status == Filing.Status.PENDING.value
def test_post_valid_ar_failed_payment(monkeypatch, session, client, jwt):
"""Assert that a unpaid filing can be posted."""
identifier = 'CP7654321'
business = factory_business(identifier)
factory_business_mailing_address(business)
old_svc = current_app.config.get('PAYMENT_SVC_URL')
current_app.config['PAYMENT_SVC_URL'] = 'http://nowhere.localdomain'
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=AR_FILING,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
current_app.config['PAYMENT_SVC_URL'] = old_svc
assert rv.status_code == HTTPStatus.PAYMENT_REQUIRED
assert rv.json.get('errors')
assert rv.json['errors'][0]['message'] == 'unable to create invoice for payment.'
@integration_payment
def test_update_ar_filing_to_a_business(session, client, jwt):
"""Assert that a filing can be updated if not paid."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
factory_business_mailing_address(b)
filings = factory_filing(b, AR_FILING)
ar = copy.deepcopy(AR_FILING)
ar['filing']['header']['date'] = '2001-08-05'
rv = client.put(f'/api/v1/businesses/{identifier}/filings/{filings.id}',
json=ar,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
ar['filing']['header']['submitter'] = identifier
ar['filing']['header']['date'] = rv.json['filing']['header']['date']
assert rv.status_code == HTTPStatus.ACCEPTED
assert rv.json['filing']['business'] == ar['filing']['business']
assert rv.json['filing']['annualReport'] == ar['filing']['annualReport']
assert rv.json['filing']['header']['filingId']
assert rv.json['filing']['header']['submitter']
assert rv.json['filing']['header']['paymentToken']
def test_update_draft_ar(session, client, jwt):
"""Assert that a valid filing can be updated to a paid filing."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
ar = copy.deepcopy(AR_FILING)
rv = client.put(f'/api/v1/businesses/{identifier}/filings/{filings.id}?draft=true',
json=ar,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.ACCEPTED
assert rv.json['filing']['business'] == ar['filing']['business']
assert rv.json['filing']['annualReport'] == ar['filing']['annualReport']
assert not rv.json['filing']['header'].get('paymentToken')
assert rv.json['filing']['header']['filingId'] == filings.id
def test_update_block_ar_update_to_a_paid_filing(session, client, jwt):
"""Assert that a valid filing can NOT be updated once it has been paid."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
ar = copy.deepcopy(AR_FILING)
filings = factory_filing(b, ar)
filings.payment_token = 'token'
filings.save()
rv = client.put(f'/api/v1/businesses/{identifier}/filings/{filings.id}',
json=ar,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.FORBIDDEN
assert rv.json['errors'][0] == {'error': 'Filings cannot be changed after the invoice is created.'}
def test_update_ar_with_a_missing_filing_id_fails(session, client, jwt):
"""Assert that updating a missing filing fails."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
ar = copy.deepcopy(AR_FILING)
ar['filing']['header']['paymentToken'] = 'token'
rv = client.put(f'/api/v1/businesses/{identifier}/filings/{filings.id+1}',
json=ar,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json['errors'][0] == {'message': f'{identifier} no filings found'}
def test_update_ar_with_a_missing_business_id_fails(session, client, jwt):
"""Assert that updating to a non-existant business fails."""
import copy
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
ar = copy.deepcopy(AR_FILING)
ar['filing']['header']['paymentToken'] = 'token'
identifier = 'CP0000001'
rv = client.put(f'/api/v1/businesses/{identifier}/filings/{filings.id+1}',
json=ar,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json['errors'][0] == {'message': f'{identifier} not found'}
def test_update_ar_with_missing_json_body_fails(session, client, jwt):
"""Assert that updating a filing with no JSON body fails."""
identifier = 'CP7654321'
b = factory_business(identifier)
filings = factory_filing(b, AR_FILING)
rv = client.put(f'/api/v1/businesses/{identifier}/filings/{filings.id+1}',
json=None,
headers=create_header(jwt, [STAFF_ROLE], identifier)
)
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'][0] == {'message': f'No filing json data in body of post for {identifier}.'}
# @integration_nats
# @pytest.mark.asyncio
# async def test_colin_filing_to_queue(app_ctx, session, client, jwt, stan_server, event_loop):
# """Assert that payment tokens can be retrieved and decoded from the Queue."""
# import copy
# # SETUP
# msgs = []
# this_loop = asyncio.get_event_loop()
# # this_loop = event_loop
# future = asyncio.Future(loop=this_loop)
# queue = QueueService(app_ctx, this_loop)
# await queue.connect()
# async def cb(msg):
# nonlocal msgs
# nonlocal future
# msgs.append(msg)
# if len(msgs) == 5:
# future.set_result(True)
# await queue.stan.subscribe(subject=queue.subject,
# queue='colin_queue',
# durable_name='colin_queue',
# cb=cb)
# # TEST - add some COLIN filings to the system, check that they got placed on the Queue
# for i in range(0, 5):
# # Create business
# identifier = f'CP765432{i}'
# b = factory_business(identifier)
# factory_business_mailing_address(b)
# # Create anm AR filing for the business
# ar = copy.deepcopy(AR_FILING)
# ar['filing']['header']['colinId'] = 1230 + i
# ar['filing']['business']['identifier'] = identifier
# # POST the AR
# rv = client.post(f'/api/v1/businesses/{identifier}/filings',
# json=ar,
# headers=create_header(jwt, [COLIN_SVC_ROLE], 'colin_service')
# )
# # | |
<reponame>suhasghorp/FinancePy<filename>financepy/products/equity/FinEquityBarrierOption.py
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 16:51:05 2016
@author: <NAME>
"""
#
from math import exp, log, sqrt
import numpy as np
from enum import Enum
from ...finutils.FinError import FinError
from ...finutils.FinMath import N
from ...finutils.FinGlobalVariables import gDaysInYear
from ...products.equity.FinEquityOption import FinEquityOption
from ...models.FinProcessSimulator import FinProcessSimulator
class FinEquityBarrierTypes(Enum):
DOWN_AND_OUT_CALL = 1
DOWN_AND_IN_CALL = 2
UP_AND_OUT_CALL = 3
UP_AND_IN_CALL = 4
UP_AND_OUT_PUT = 5
UP_AND_IN_PUT = 6
DOWN_AND_OUT_PUT = 7
DOWN_AND_IN_PUT = 8
###############################################################################
##########################################################################
class FinEquityBarrierOption(FinEquityOption):
''' Class to hold details of an Equity Barrier Option. It also
calculates the option price using Black Scholes for 8 different
variants on the Barrier structure in enum FinEquityBarrierTypes. '''
def __init__(self,
expiryDate,
strikePrice,
optionType,
barrierLevel,
numObservationsPerYear,
notional = 1.0):
self._expiryDate = expiryDate
self._strikePrice = float(strikePrice)
self._barrierLevel = float(barrierLevel)
self._numObservationsPerYear = int(numObservationsPerYear)
if optionType not in FinEquityBarrierTypes:
raise FinError("Option Type ", optionType, " unknown.")
self._optionType = optionType
self._notional = notional
##########################################################################
def value(
self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
# This prices the option using the formulae given in the paper
# by Clewlow, Llanos and Strickland December 1994 which can be found at
# https://warwick.ac.uk/fac/soc/wbs/subjects/finance/research/wpaperseries/1994/94-54.pdf
t = (self._expiryDate - valueDate) / gDaysInYear
lnS0k = log(float(stockPrice) / self._strikePrice)
sqrtT = sqrt(t)
df = discountCurve.df(t)
r = -np.log(df)/t
k = self._strikePrice
s = stockPrice
h = self._barrierLevel
volatility = model._volatility
sigmaRootT = volatility * sqrtT
v2 = volatility * volatility
mu = r - dividendYield
d1 = (lnS0k + (mu + v2 / 2.0) * t) / sigmaRootT
d2 = (lnS0k + (mu - v2 / 2.0) * t) / sigmaRootT
df = exp(-r * t)
dq = exp(-dividendYield * t)
c = s * dq * N(d1) - k * df * N(d2)
p = k * df * N(-d2) - s * dq * N(-d1)
# print("CALL:",c,"PUT:",p)
if self._optionType == FinEquityBarrierTypes.DOWN_AND_OUT_CALL and s <= h:
return 0.0
elif self._optionType == FinEquityBarrierTypes.UP_AND_OUT_CALL and s >= h:
return 0.0
elif self._optionType == FinEquityBarrierTypes.UP_AND_OUT_PUT and s >= h:
return 0.0
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_OUT_PUT and s <= h:
return 0.0
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_IN_CALL and s <= h:
return c
elif self._optionType == FinEquityBarrierTypes.UP_AND_IN_CALL and s >= h:
return c
elif self._optionType == FinEquityBarrierTypes.UP_AND_IN_PUT and s >= h:
return p
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_IN_PUT and s <= h:
return p
numObservations = t * self._numObservationsPerYear
# Correction by Broadie, <NAME> Kou, Mathematical Finance, 1997
# Adjusts the barrier for discrete and not continuous observations
h_adj = h
if self._optionType == FinEquityBarrierTypes.DOWN_AND_OUT_CALL:
h_adj = h * exp(-0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_IN_CALL:
h_adj = h * exp(-0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.UP_AND_IN_CALL:
h_adj = h * exp(0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.UP_AND_OUT_CALL:
h_adj = h * exp(0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.UP_AND_IN_PUT:
h_adj = h * exp(0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.UP_AND_OUT_PUT:
h_adj = h * exp(0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_OUT_PUT:
h_adj = h * exp(-0.5826 * volatility * sqrt(t / numObservations))
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_IN_PUT:
h_adj = h * exp(-0.5826 * volatility * sqrt(t / numObservations))
else:
raise FinError("Unknown barrier option type." +
str(self._optionType))
h = h_adj
if abs(volatility) < 1e-5:
volatility = 1e-5
l = (mu + v2 / 2.0) / v2
y = log(h * h / (s * k)) / sigmaRootT + l * sigmaRootT
x1 = log(s / h) / sigmaRootT + l * sigmaRootT
y1 = log(h / s) / sigmaRootT + l * sigmaRootT
hOverS = h / s
if self._optionType == FinEquityBarrierTypes.DOWN_AND_OUT_CALL:
if h >= k:
c_do = s * dq * N(x1) - k * df * N(x1 - sigmaRootT) \
- s * dq * pow(hOverS, 2.0 * l) * N(y1) \
+ k * df * pow(hOverS, 2.0 * l - 2.0) * N(y1 - sigmaRootT)
price = c_do
else:
c_di = s * dq * pow(hOverS, 2.0 * l) * N(y) \
- k * df * pow(hOverS, 2.0 * l - 2.0) * N(y - sigmaRootT)
price = c - c_di
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_IN_CALL:
if h <= k:
c_di = s * dq * pow(hOverS, 2.0 * l) * N(y) \
- k * df * pow(hOverS, 2.0 * l - 2.0) * N(y - sigmaRootT)
price = c_di
else:
c_do = s * dq * N(x1) \
- k * df * N(x1 - sigmaRootT) \
- s * dq * pow(hOverS, 2.0 * l) * N(y1) \
+ k * df * pow(hOverS, 2.0 * l - 2.0) * N(y1 - sigmaRootT)
price = c - c_do
elif self._optionType == FinEquityBarrierTypes.UP_AND_IN_CALL:
if h >= k:
c_ui = s * dq * N(x1) - k * df * N(x1 - sigmaRootT) \
- s * dq * pow(hOverS, 2.0 * l) * (N(-y) - N(-y1)) \
+ k * df * pow(hOverS, 2.0 * l - 2.0) * (N(-y + sigmaRootT) - N(-y1 + sigmaRootT))
price = c_ui
else:
price = c
elif self._optionType == FinEquityBarrierTypes.UP_AND_OUT_CALL:
if h > k:
c_ui = s * dq * N(x1) - k * df * N(x1 - sigmaRootT) \
- s * dq * pow(hOverS, 2.0 * l) * (N(-y) - N(-y1)) \
+ k * df * pow(hOverS, 2.0 * l - 2.0) * (N(-y + sigmaRootT) - N(-y1 + sigmaRootT))
price = c - c_ui
else:
price = 0.0
elif self._optionType == FinEquityBarrierTypes.UP_AND_IN_PUT:
if h > k:
p_ui = -s * dq * pow(hOverS, 2.0 * l) * N(-y) \
+ k * df * pow(hOverS, 2.0 * l - 2.0) * N(-y + sigmaRootT)
price = p_ui
else:
p_uo = -s * dq * N(-x1) \
+ k * df * N(-x1 + sigmaRootT) \
+ s * dq * pow(hOverS, 2.0 * l) * N(-y1) \
- k * df * pow(hOverS, 2.0 * l - 2.0) * N(-y1 + sigmaRootT)
price = p - p_uo
elif self._optionType == FinEquityBarrierTypes.UP_AND_OUT_PUT:
if h >= k:
p_ui = -s * dq * pow(hOverS, 2.0 * l) * N(-y) \
+ k * df * pow(hOverS, 2.0 * l - 2.0) * N(-y + sigmaRootT)
price = p - p_ui
else:
p_uo = -s * dq * N(-x1) \
+ k * df * N(-x1 + sigmaRootT) \
+ s * dq * pow(hOverS, 2.0 * l) * N(-y1) \
- k * df * pow(hOverS, 2.0 * l - 2.0) * N(-y1 + sigmaRootT)
price = p_uo
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_OUT_PUT:
if h >= k:
price = 0.0
else:
p_di = -s * dq * N(-x1) \
+ k * df * N(-x1 + sigmaRootT) \
+ s * dq * pow(hOverS, 2.0 * l) * (N(y) - N(y1)) \
- k * df * pow(hOverS, 2.0 * l - 2.0) * (N(y - sigmaRootT) - N(y1 - sigmaRootT))
price = p - p_di
elif self._optionType == FinEquityBarrierTypes.DOWN_AND_IN_PUT:
if h >= k:
price = p
else:
p_di = -s * dq * N(-x1) \
+ k * df * N(-x1 + sigmaRootT) \
+ s * dq * pow(hOverS, 2.0 * l) * (N(y) - N(y1)) \
- k * df * pow(hOverS, 2.0 * l - 2.0) * (N(y - sigmaRootT) - N(y1 - sigmaRootT))
price = p_di
else:
raise FinError("Unknown barrier option type." +
str(self._optionType))
v = price * self._notional
return v
###############################################################################
def valueMC(
self,
valueDate,
stockPrice,
discountCurve,
processType,
modelParams,
numAnnSteps=252,
numPaths=10000,
seed=4242):
t = (self._expiryDate - valueDate) / gDaysInYear
numTimeSteps = int(t * numAnnSteps)
K = self._strikePrice
B = self._barrierLevel
optionType = self._optionType
process = FinProcessSimulator()
df = discountCurve.df(t)
r = -np.log(df)/t
#######################################################################
if optionType == FinEquityBarrierTypes.DOWN_AND_OUT_CALL and stockPrice <= B:
return 0.0
elif optionType == FinEquityBarrierTypes.UP_AND_OUT_CALL and stockPrice >= B:
return 0.0
elif optionType == FinEquityBarrierTypes.DOWN_AND_OUT_PUT and stockPrice <= B:
return | |
rows:
rows_list.append(row)
for row in rows_list:
fueltype_str = row[0]
fueltype_int = fueltypes_lu[fueltype_str]
for cnt, entry in enumerate(row[1:], 1):
enduse = headings[cnt]
sector = _secondline[cnt]
fuels[enduse][sector][fueltype_int] += float(entry)
except ValueError:
raise Exception(
"The service sector fuel could not be loaded. Check if empty cells.")
return fuels, sorted(sectors), sorted(enduses)
def read_load_shapes_tech(path_to_csv):
"""This function reads in csv technology shapes
Arguments
----------
path_to_csv : str
Path to csv file
"""
load_shapes_dh = {}
with open(path_to_csv, 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
headings = next(rows) # Skip first row
for row in rows:
dh_shape = np.zeros((24), dtype="float")
for cnt, row_entry in enumerate(row[1:], 1):
dh_shape[int(headings[cnt])] = float(row_entry)
load_shapes_dh[str(row[0])] = dh_shape
return load_shapes_dh
def service_switch(df_service_switches):
"""This function reads in service assumptions from csv file,
tests whether the maximum defined switch is larger than
possible for a technology.
Arguments
----------
path_to_csv : str
Path to csv file
technologies : list
All technologies
Returns
-------
enduse_tech_ey_p : dict
Technologies per enduse for endyear in p
service_switches : dict
Service switches
Notes
-----
The base year service shares are generated from technology stock definition
Info
-----
The following attributes need to be defined for a service switch.
Attribute Description
========== =========================
enduse [str] Enduse affected by switch
tech [str] Technology
switch_yr [int] Year until switch is fully realised
service_share_ey [str] Service share of 'tech' in 'switch_yr'
sector [str] Optional sector specific info where switch applies
"""
test_enduses = set([])
service_switches = []
default_parameter = 999.0 #default parameter
for i in df_service_switches.index:
enduse = df_service_switches.at[i, 'enduses_service_switch']
test_enduses.add(enduse)
tech = df_service_switches.at[i, 'tech']
service_share_ey = df_service_switches.at[i, 'switches_service']
switch_yr = df_service_switches.at[i, 'end_yr']
sector = df_service_switches.at[i, 'sector']
if sector == 'None':
sector = None
if float(service_share_ey) == default_parameter:
pass
else:
service_switches.append(
ServiceSwitch(
enduse=str(enduse),
technology_install=str(tech),
service_share_ey=float(service_share_ey),
switch_yr=float(switch_yr),
sector=sector))
# --------------------------------------------
# Test if not 100% per enduse is defined
# --------------------------------------------
for enduse in test_enduses:
switch_yrs = {}
for switch in service_switches:
if switch.enduse == enduse:
year = switch.switch_yr
value = switch.service_share_ey
#print("... {} {} {}".format(year, value, enduse))
if year in switch_yrs.keys():
switch_yrs[year] += value
else:
switch_yrs[year] = value
for year, value in switch_yrs.items():
if value != 1.0:
raise Exception("WRONG SERVICE SWITHC INPUT AS NOT SUMS TO 1.0 (100%) {} {} {}".format(value, year, enduse))
return service_switches
def read_fuel_switches(
path_to_csv,
enduses,
fueltypes,
technologies,
base_yr=2015
):
"""This function reads in from CSV file defined fuel
switch assumptions
Arguments
----------
path_to_csv : str
Path to csv file
enduses : dict
Endues per submodel
fueltypes : dict
Look-ups
technologies : dict
Technologies
Returns
-------
dict_with_switches : dict
All assumptions about fuel switches provided as input
Info
-----
The following attributes need to be defined for a fuel switch.
Attribute Description
========== =========================
enduse [str] Enduse affected by switch
fueltype_replace [str] Fueltype to be switched from
technology_install [str] Technology which is installed
switch_yr [int] Year until switch is fully realised
fuel_share_switched_ey [float] Share of fuel which is switched until switch_yr
sector [str] Optional sector specific info where switch applies
If field is empty the switch is across all sectors
"""
fuel_switches = []
if os.path.isfile(path_to_csv):
raw_csv_file = pd.read_csv(path_to_csv)
for index, row in raw_csv_file.iterrows():
fuel_switches.append(
FuelSwitch(
enduse=str(row['enduse']),
fueltype_replace=fueltypes[str(row['fueltype_replace'])],
technology_install=str(row['technology_install']),
switch_yr=float(row['switch_yr']),
fuel_share_switched_ey=float(row['fuel_share_switched_ey']),
sector=row['sector']))
# -------
# Testing
#
# Test if more than 100% per fueltype is switched or more than
# than theoretically possible per technology
# --------
# Testing wheter the provided inputs make sense
for obj in fuel_switches:
if obj.fuel_share_switched_ey == 0:
raise Exception(
"Input error: The share of switched fuel must be > 0. Delete {} from input".format(
obj.technology_install))
for obj_iter in fuel_switches:
# Test if lager than maximum defined technology diffusion (L)
if obj_iter.fuel_share_switched_ey > technologies[obj_iter.technology_install].tech_max_share:
raise Exception(
"Configuration Error: More service provided for tech '{}' in enduse '{}' than max possible".format(
obj_iter.enduse, obj_iter.technology_install))
if obj_iter.fuel_share_switched_ey > 1.0:
raise Exception(
"Configuration Error: The fuel switches are > 1.0 for enduse {} and fueltype {}".format(
obj.enduse, obj.fueltype_replace))
if obj.switch_yr <= base_yr:
raise Exception("Configuration Error of fuel switch: switch_yr must be in the future")
# Test whether defined enduse exist
for obj in fuel_switches:
if obj.enduse in enduses['service'] or obj.enduse in enduses['residential'] or obj.enduse in enduses['industry']:
pass
else:
raise Exception(
"Input Error: The defined enduse '{}' to switch fuel from is not defined...".format(
obj.enduse))
else:
pass
return fuel_switches
def read_technologies(path_to_csv):
"""Read in technology definition csv file. Append
for every technology type a 'placeholder_tech'.
Arguments
----------
path_to_csv : str
Path to csv file
Returns
-------
dict_technologies : dict
All technologies and their assumptions provided as input
dict_tech_lists : dict
List with technologies. The technology type
is defined in the technology input file. A placeholder technology
is added for every list in order to allow that a generic
technology type can be added for every enduse
Info
-----
The following attributes need to be defined for implementing
a technology.
Attribute Description
========== =========================
technology [str] Name of technology
fueltype [str] Fueltype of technology
eff_by [float] Efficiency in base year
eff_ey [float] Efficiency in future end year
year_eff_ey [int] Future year where efficiency is fully reached
eff_achieved [float] Factor of how much of the efficiency
is achieved (overwritten by scenario input)
This is set to 1.0 as default for initial
technology class generation
diff_method market_entry [int] Year of market entry of technology
tech_list [str] Definition of to which group
of technologies a technology belongs
tech_max_share [float] Maximum share of technology related
energy service which can be reached in theory
description [str] Optional technology description
"""
dict_technologies = {}
dict_tech_lists = {}
raw_csv_file = pd.read_csv(path_to_csv)
for index, row in raw_csv_file.iterrows():
dict_technologies[str(row['technology'])] = TechnologyData(
name=str(row['technology']),
fueltype=str(row['fueltype']),
eff_by=float(row['efficiency in base year']),
eff_ey=float(row['efficiency in future year']),
year_eff_ey=float(row['year when efficiency is fully realised']),
eff_achieved=1.0, # Set to one as default
diff_method=str(row['diffusion method (sigmoid or linear)']),
market_entry=float(row['market_entry']),
tech_type=str(row['technology type']),
tech_max_share=float(row['maximum theoretical service share of technology']),
description=str(row['description']))
try:
dict_tech_lists[row['technology type']].append(row['technology'])
except KeyError:
dict_tech_lists[row['technology type']] = [row['technology']]
# Add placeholder technology to all tech_lists
for tech_list in dict_tech_lists.values():
tech_list.append('placeholder_tech')
return dict_technologies, dict_tech_lists
def read_fuel_rs(path_to_csv):
"""This function reads in base_data_CSV all fuel types
(first row is fueltype, subkey), header is appliances
Arguments
----------
path_to_csv : str
Path to csv file
_dt : str
Defines dtype of array to be read in (takes float)
Returns
-------
fuels : dict
Residential fuels
enduses : list
Residential end uses
Notes
-----
the first row is the fuel_ID
The header is the sub_key
"""
dummy_sector = None
sectors = [dummy_sector]
fuels = {}
# Read csv
raw_csv_file = pd.read_csv(path_to_csv)
# Replace NaN with " " values
raw_csv_file = raw_csv_file.fillna(0)
# Enduses
enduses = list(raw_csv_file.columns[1:].values) #skip fuel_id
# Replace str fueltypes with int fueltypes
raw_csv_file['fuel_id'] = raw_csv_file['fuel_id'].apply(tech_related.get_fueltype_int)
# Iterate columns and convert to array
for enduse in raw_csv_file.columns[1:]: # skip for column
fuels[enduse] = {}
fuels[enduse][dummy_sector] = raw_csv_file[enduse].values
return fuels, sectors, list(enduses)
def read_fuel_is(path_to_csv, fueltypes_nr):
"""This function reads in base_data_CSV all fuel types
Arguments
----------
path_to_csv : str
Path to csv file
fueltypes_nr : int
Number of fueltypes
Returns
-------
fuels : dict
Industry fuels
sectors : list
Industral sectors
enduses : list
Industrial enduses
Info
----
Source: User Guide Energy Consumption in the UK
https://www.gov.uk/government/uploads/system/uploads/attach
ment_data/file/573271/ECUK_user_guide_November_2016_final.pdf
https://unstats.un.org/unsd/cr/registry/regcst.asp?Cl=27
http://ec.europa.eu/eurostat/ramon/nomenclatures/
index.cfm?TargetUrl=LST_NOM_DTL&StrNom=NACE_REV2&StrLanguageCode=EN&IntPcKey=&StrLayoutCode=
High temperature processes
=============================
High temperature processing dominates energy consumption in the iron and steel,
non-ferrous metal, bricks, cement, glass and potteries industries. This includes
- coke ovens
- blast furnaces and other furnaces
- kilns and
- glass tanks.
Low temperature processes
=============================
Low temperature processes are the largest end use of energy for the food, drink
and tobacco industry. This includes:
- process heating and distillation in the chemicals sector;
- baking and separation processes in food and drink;
- pressing and drying processes, in paper manufacture;
- and washing, scouring, dyeing and drying in the textiles industry.
Drying/separation
=============================
Drying and separation is important in paper-making while motor processes are used
more in the manufacture of chemicals and chemical products than in any other
individual industry.
Motors
=============================
This | |
[107.631592,-6.99563],
[107.631622,-6.99599],
[107.63163,-6.99629],
[107.631638,-6.99639],
[107.631683,-6.99648],
[107.631737,-6.99658],
[107.632019,-6.99672],
[107.632507,-6.99689],
[107.632988,-6.99708],
[107.633209,-6.99721],
[107.633423,-6.99735],
[107.63353,-6.99746],
[107.633636,-6.99761],
[107.633667,-6.99777],
[107.633659,-6.998],
[107.633629,-6.99825],
[107.63356,-6.99851],
[107.633331,-6.99903],
[107.633217,-6.99926],
[107.633148,-6.99948],
[107.633072,-6.99966],
[107.633049,-6.99976],
[107.633057,-6.99988],
[107.633057,-7],
[107.633179,-7.0001],
[107.633331,-7.00016],
[107.633553,-7.00022],
[107.633911,-7.00024],
[107.634323,-7.00026],
[107.634682,-7.00025],
[107.634972,-7.00032],
[107.635246,-7.0005],
[107.63546,-7.00071],
[107.635689,-7.00112],
[107.63591,-7.00144],
[107.636078,-7.00157],
[107.63633,-7.00173],
[107.636726,-7.00191],
[107.637108,-7.0021],
[107.637611,-7.00238],
[107.638062,-7.00267],
[107.638474,-7.00308],
[107.638718,-7.00338],
[107.63903,-7.00367],
[107.639412,-7.004],
[107.639877,-7.00439],
[107.640427,-7.0048],
[107.640762,-7.00507],
[107.640984,-7.00537],
[107.641281,-7.00585],
[107.641586,-7.0063],
[107.641777,-7.0065],
[107.641899,-7.006569],
[107.642029,-7.00665],
[107.642227,-7.00672],
[107.642502,-7.00674],
[107.642906,-7.00676],
[107.643272,-7.00673],
[107.643532,-7.006766],
[107.643806,-7.006852],
[107.644028,-7.006895],
[107.644142,-7.006952],
[107.644196,-7.007088],
[107.644249,-7.007274],
[107.644188,-7.007704],
[107.644211,-7.008054],
[107.644226,-7.008349],
[107.644302,-7.008588],
[107.644432,-7.008828],
[107.644669,-7.008976],
[107.644913,-7.008791],
[107.645035,-7.008644],
[107.64518,-7.008411],
[107.645409,-7.008225],
[107.645538,-7.008103],
[107.645699,-7.008053],
[107.645836,-7.008039],
[107.645927,-7.00812],
[107.646042,-7.00819],
[107.64608,-7.008411],
[107.646095,-7.008618],
[107.646111,-7.008947],
[107.646141,-7.009355],
[107.646141,-7.009583],
[107.646217,-7.010048],
[107.646309,-7.010277],
[107.6464,-7.010392],
[107.646454,-7.010456],
[107.646561,-7.010506],
[107.646698,-7.01053],
[107.646828,-7.010492],
[107.646896,-7.010385],
[107.646927,-7.010191],
[107.647011,-7.00987],
[107.647125,-7.009419],
[107.647232,-7.008947],
[107.647354,-7.008497],
[107.647545,-7.008504],
[107.647705,-7.008511],
[107.648041,-7.00849],
[107.648598,-7.00837],
[107.649017,-7.00827],
[107.64962,-7.00815],
[107.650253,-7.00801],
[107.650787,-7.00791],
[107.651428,-7.0078],
[107.652138,-7.00764],
[107.652489,-7.00753],
[107.652771,-7.00746],
[107.652946,-7.00745],
[107.653122,-7.00747],
[107.653221,-7.00762],
[107.653236,-7.0078],
[107.653229,-7.00809],
[107.653221,-7.00831],
[107.653221,-7.0088],
[107.653229,-7.0092],
[107.653229,-7.00935],
[107.653229,-7.00949],
[107.653229,-7.00993],
[107.653259,-7.01026],
[107.653313,-7.01041],
[107.653473,-7.01062],
[107.653709,-7.01079],
[107.654083,-7.01097],
[107.654388,-7.01108],
[107.654762,-7.01119],
[107.65506,-7.01119],
[107.655289,-7.01118],
[107.655647,-7.01114],
[107.6558,-7.01109],
[107.656052,-7.01103],
[107.656281,-7.01092],
[107.656509,-7.0108],
[107.656593,-7.01071],
[107.656731,-7.01053],
[107.656807,-7.01037],
[107.656853,-7.01015],
[107.656853,-7.00971],
[107.656807,-7.00936],
[107.656761,-7.00878],
[107.656738,-7.00851],
[107.6567,-7.00818],
[107.656639,-7.00781],
[107.656639,-7.00742],
[107.656647,-7.00711],
[107.656677,-7.00693],
[107.656738,-7.00676],
[107.656799,-7.00664],
[107.656937,-7.00654],
[107.657158,-7.00648],
[107.657356,-7.00643],
[107.657761,-7.00639],
[107.658058,-7.00634],
[107.658302,-7.0063],
[107.65847,-7.00626],
[107.658699,-7.00616],
[107.658813,-7.00604],
[107.658913,-7.00584],
[107.659012,-7.00558],
[107.65905,-7.00526],
[107.659088,-7.00478],
[107.659088,-7.00434],
[107.65905,-7.00383],
[107.659058,-7.00339],
[107.659073,-7.00306],
[107.659073,-7.00276],
[107.659088,-7.00258],
[107.659157,-7.00243],
[107.659279,-7.00233],
[107.659569,-7.00228],
[107.659897,-7.00226],
[107.660217,-7.00221],
[107.660378,-7.0022],
[107.660591,-7.00217],
[107.66082,-7.00205],
]])
def jalanDesaBojongsari2(self, nama):
self.jalan.record(nama)
self.jalan.line(
[[
[107.66691,-6.9988],
[107.66697,-6.99881],
[107.66771,-6.99884],
[107.66933,-6.999],
[107.6694,-6.99899],
[107.66946,-6.99896],
[107.66984,-6.99882],
[107.66992,-6.99878],
[107.67055,-6.99858],
[107.67059,-6.99856],
[107.67065,-6.9985],
[107.67069,-6.99843],
[107.67075,-6.99811],
[107.67078,-6.99804],
[107.67079,-6.99803],
[107.6708,-6.99801],
[107.67103,-6.99781],
[107.67136,-6.99757],
[107.67154,-6.99741],
[107.67192,-6.99759],
[107.67179,-6.99788],
[107.67138,-6.99862],
[107.67123,-6.99881],
[107.67111,-6.99889],
[107.67084,-6.99894],
[107.66956,-6.99908],
[107.6695,-6.99908],
[107.66933,-6.99912],
[107.6693,-6.99914],
[107.66921,-6.99917],
[107.66914,-6.99921],
[107.66893,-6.99938],
[107.66881,-6.99953],
[107.66753,-7.00075],
[107.66748,-7.00077],
[107.66744,-7.00077],
[107.66742,-7.00075],
[107.66741,-7.00072],
[107.66741,-7.00065],
[107.66747,-7.00052],
[107.66837,-6.99891],
[107.66933,-6.999],
[107.66936,-6.99812],
[107.66933,-6.999],
[107.66771,-6.99884],
[107.66713,-6.99882],
[107.66508,-6.99867],
[107.6642,-6.99864],
[107.66411,-6.99866],
[107.66409,-6.99867],
[107.66407,-6.99869],
[107.66405,-6.99874],
[107.66401,-6.9992],
[107.66399,-6.99926],
[107.66399,-6.99927],
[107.66398,-6.99928],
[107.66393,-6.99931],
[107.6639,-6.99931],
[107.66052,-6.99874],
[107.6604,-6.99874],
[107.66037,-6.99875],
[107.66036,-6.99875],
[107.66031,-6.99879],
[107.6603,-6.99882],
[107.66024,-6.99891],
[107.66022,-6.99892],
[107.6602,-6.99894],
[107.66018,-6.99895],
[107.66014,-6.99896],
[107.66007,-6.99903],
[107.66002,-6.99914],
[107.65954,-7.00047],
[107.65949,-7.00055],
[107.6593,-7.00074],
[107.65916,-7.00086],
[107.6591,-7.00089],
[107.65904,-7.0009],
[107.65899,-7.00089],
[107.65892,-7.00089],
[107.65879,-7.00086],
[107.65873,-7.00087],
[107.65868,-7.00089],
[107.65859,-7.001],
[107.65853,-7.00111],
[107.65807,-7.00213],
[107.65777,-7.00296],
[107.65746,-7.00362],
[107.6569,-7.00347],
[107.65689,-7.00346],
[107.65557,-7.00308],
[107.6469,-7.0003],
[107.64653,-7.0002],
[107.64649,-7.00002],
[107.64648,-6.99993],
[107.64648,-6.99975],
[107.64649,-6.99968],
[107.64648,-6.9996],
[107.64647,-6.99958],
[107.64644,-6.99956],
[107.64582,-6.99947],
[107.64515,-6.99944],
[107.64412,-6.99927],
[107.64388,-6.9992],
[107.64378,-6.99916],
[107.64373,-6.99913],
[107.64281,-6.9989],
[107.64272,-6.99936],
[107.64281,-6.9989],
[107.64373,-6.99913],
[107.64378,-6.99916],
[107.64388,-6.9992],
[107.64412,-6.99927],
[107.64515,-6.99944],
[107.64582,-6.99947],
[107.64644,-6.99956],
[107.64647,-6.99958],
[107.64648,-6.9996],
[107.64649,-6.99968],
[107.64648,-6.99975],
[107.64648,-6.99993],
[107.64649,-7.00002],
[107.64653,-7.0002],
[107.6469,-7.0003],
[107.65557,-7.00308],
[107.65689,-7.00346],
[107.6569,-7.00347],
[107.65746,-7.00362],
[107.65777,-7.00296],
[107.65807,-7.00213],
[107.65853,-7.00111],
[107.65859,-7.001],
[107.65868,-7.00089],
[107.65873,-7.00087],
[107.65879,-7.00086],
[107.65892,-7.00089],
[107.65899,-7.00089],
[107.65904,-7.0009],
[107.6591,-7.00089],
[107.65916,-7.00086],
[107.6593,-7.00074],
[107.65949,-7.00055],
[107.65954,-7.00047],
[107.66002,-6.99914],
[107.66007,-6.99903],
[107.66014,-6.99896],
[107.66018,-6.99895],
[107.6602,-6.99894],
[107.66022,-6.99892],
[107.66024,-6.99891],
[107.6603,-6.99882],
[107.66031,-6.99879],
[107.66036,-6.99875],
[107.66037,-6.99875],
[107.6604,-6.99874],
[107.66052,-6.99874],
[107.6639,-6.99931],
[107.66393,-6.99931],
[107.66398,-6.99928],
[107.66399,-6.99927],
[107.66399,-6.99926],
[107.66401,-6.9992],
[107.66405,-6.99874],
[107.66407,-6.99869],
[107.66409,-6.99867],
[107.66411,-6.99866],
[107.6642,-6.99864],
[107.66508,-6.99867],
[107.66713,-6.99882],
[107.66771,-6.99884],
[107.66821,-6.9989],
[107.66868,-6.9979],
[107.66879,-6.9976],
[107.66884,-6.99741],
[107.66888,-6.99713],
[107.66868,-6.99445],
[107.6687,-6.99414],
[107.6687,-6.99309],
[107.66864,-6.9925],
[107.6685,-6.99164],
[107.6684,-6.99157],
[107.66832,-6.99154],
[107.66665,-6.99117],
[107.66567,-6.99099],
[107.66492,-6.99081],
[107.66444,-6.99073],
[107.66443,-6.99072],
[107.66362,-6.99058],
[107.66259,-6.99049],
[107.66136,-6.99029],
[107.65916,-6.99],
[107.65745,-6.98966],
[107.65533,-6.98932],
[107.65476,-6.98926],
[107.6528,-6.98893],
[107.65206,-6.99165],
[107.65193,-6.99242],
[107.6519,-6.99279],
[107.65191,-6.99283],
[107.65194,-6.99285],
[107.65219,-6.99294],
[107.65224,-6.99297],
[107.65246,-6.99319],
[107.65276,-6.99301],
[107.65281,-6.993],
[107.65475,-6.99347],
[107.65515,-6.99376],
[107.65621,-6.99443],
[107.65755,-6.9959],
[107.6579,-6.99614],
[107.65842,-6.99663],
[107.6579,-6.99614],
[107.65755,-6.9959],
[107.65621,-6.99443],
[107.65515,-6.99376],
[107.65475,-6.99347],
[107.65281,-6.993],
[107.65276,-6.99301],
[107.65246,-6.99319],
[107.65224,-6.99297],
[107.65219,-6.99294],
[107.65223,-6.99311],
[107.65232,-6.99339],
[107.65248,-6.99369],
[107.65275,-6.9941],
[107.65277,-6.99416],
[107.65292,-6.99438],
[107.65299,-6.99468],
[107.653,-6.99497],
[107.65299,-6.99504],
[107.65293,-6.99516],
[107.6528,-6.99532],
[107.65274,-6.99537],
[107.65164,-6.99602],
[107.65103,-6.99607],
[107.65085,-6.99612],
[107.65076,-6.99619],
[107.65065,-6.99633],
[107.65044,-6.99654],
[107.65041,-6.99656],
[107.6504,-6.99658],
[107.65017,-6.99681],
[107.64999,-6.99693],
[107.64987,-6.99704],
[107.6498,-6.99713],
[107.6498,-6.99714],
[107.64975,-6.99724],
[107.64965,-6.99751],
[107.64963,-6.99754],
[107.64961,-6.99759],
[107.64948,-6.99779],
[107.6493,-6.99799],
[107.64864,-6.99861],
[107.64855,-6.99873],
[107.64818,-6.99934],
[107.64753,-7.00051],
[107.64751,-7.00066],
[107.64749,-7.0007],
[107.64736,-7.00085],
[107.64704,-7.00136],
[107.64687,-7.00157],
[107.64674,-7.00169],
[107.64369,-7.00545],
[107.64309,-7.00639],
[107.64277,-7.00629],
[107.64278,-7.00628],
[107.64282,-7.00621],
[107.64294,-7.00606],
[107.64306,-7.00587],
[107.64337,-7.00547],
[107.64342,-7.00539],
[107.64349,-7.00524],
[107.64352,-7.00521],
[107.64354,-7.00521],
[107.64358,-7.00522],
[107.64361,-7.00522],
[107.6446,-7.00402],
[107.64465,-7.00393],
[107.64676,-7.00128],
[107.64712,-7.00075],
[107.64729,-7.00044],
[107.6469,-7.0003],
[107.64653,-7.0002],
[107.64649,-7.00002],
[107.64648,-6.99993],
[107.64648,-6.99975],
[107.64649,-6.99968],
[107.64648,-6.9996],
[107.64647,-6.99958],
[107.64644,-6.99956],
[107.64582,-6.99947],
[107.64543,-6.99945],
[107.64538,-6.9997],
[107.64538,-6.99974],
[107.64533,-6.99998],
[107.64556,-7.00001],
[107.64556,-7.00002],
[107.64557,-7.00002],
[107.64557,-7.00006],
[107.64543,-7.00046],
[107.64557,-7.00006],
[107.64557,-7.00002],
[107.64556,-7.00002],
[107.64556,-7.00001],
[107.64533,-6.99998],
[107.64538,-6.99974],
[107.64538,-6.9997],
[107.64543,-6.99945],
[107.64515,-6.99944],
[107.64412,-6.99927],
[107.64388,-6.9992],
[107.64378,-6.99916],
[107.64373,-6.99913],
[107.64269,-6.99887],
[107.64255,-6.99886],
[107.64251,-6.99885],
[107.64228,-6.99884],
[107.64182,-6.99885],
[107.64177,-6.99869],
[107.64175,-6.99853],
[107.64176,-6.99825],
[107.64236,-6.99556],
[107.64243,-6.99511],
[107.6425,-6.99485],
[107.6425,-6.99466],
[107.64245,-6.99441],
[107.64236,-6.99415],
[107.642,-6.99402],
[107.6408,-6.99351],
[107.64063,-6.99346],
[107.64057,-6.99345],
[107.6405,-6.99345],
[107.64043,-6.99346],
[107.64003,-6.99356],
[107.63976,-6.99365],
[107.63975,-6.99365],
[107.63968,-6.99368],
[107.63963,-6.99369],
[107.63943,-6.99378],
[107.63942,-6.99378],
[107.63941,-6.99379],
[107.63939,-6.99379],
[107.63931,-6.99375],
[107.63915,-6.99364],
[107.63913,-6.99362],
[107.63911,-6.99362],
[107.63909,-6.99361],
[107.63893,-6.99357],
[107.63889,-6.99357],
[107.63883,-6.99355],
[107.63881,-6.99355],
[107.63845,-6.99348],
[107.63819,-6.9935],
[107.63789,-6.99349],
[107.63634,-6.99305],
[107.63616,-6.99297],
[107.63604,-6.99293],
[107.6359,-6.99283],
[107.63571,-6.99274],
[107.6357,-6.99274],
[107.63558,-6.99269],
[107.63546,-6.99266],
[107.63542,-6.99266],
[107.63538,-6.99265],
[107.63479,-6.99262],
[107.63461,-6.99258],
[107.63456,-6.99254],
[107.63453,-6.99245],
[107.63453,-6.99239],
[107.63455,-6.99224],
[107.63455,-6.99217],
[107.63454,-6.99211],
[107.63452,-6.99207],
[107.63449,-6.99203],
[107.63448,-6.99199],
[107.63439,-6.9918],
[107.63427,-6.99148],
[107.63572,-6.99182],
[107.63572,-6.99189],
[107.6358,-6.99193],
[107.63572,-6.99189],
[107.63572,-6.99182],
[107.63427,-6.99148],
[107.63439,-6.9918],
[107.63448,-6.99199],
[107.63449,-6.99203],
[107.63452,-6.99207],
[107.63454,-6.99211],
[107.63455,-6.99217],
[107.63455,-6.99224],
[107.63453,-6.99239],
[107.63453,-6.99245],
[107.63456,-6.99254],
[107.63461,-6.99258],
[107.63464,-6.99259],
[107.63465,-6.99259],
[107.63466,-6.99264],
[107.63469,-6.99271],
[107.63472,-6.99274],
[107.63476,-6.99275],
[107.63477,-6.99276],
[107.63477,-6.99279],
[107.63476,-6.9928],
[107.63476,-6.99286],
[107.6348,-6.99286],
[107.63489,-6.99289],
[107.63496,-6.99289],
[107.63498,-6.99288],
[107.63501,-6.99288],
[107.63506,-6.99289],
[107.63507,-6.9929],
[107.63507,-6.99293],
[107.63506,-6.99294],
[107.63506,-6.99295],
[107.63505,-6.99296],
[107.63504,-6.993],
[107.63488,-6.99322],
[107.63481,-6.99335],
[107.63479,-6.99337],
[107.63478,-6.99339],
[107.6347,-6.99347],
[107.63441,-6.99369],
[107.6341,-6.99388],
[107.63441,-6.99369],
[107.6347,-6.99347],
[107.63478,-6.99339],
[107.63479,-6.99337],
[107.63481,-6.99335],
[107.63488,-6.99322],
[107.63504,-6.993],
[107.63505,-6.99296],
[107.63506,-6.99295],
[107.63506,-6.99294],
[107.63507,-6.99293],
[107.63507,-6.9929],
[107.63506,-6.99289],
[107.63501,-6.99288],
[107.63498,-6.99288],
[107.63496,-6.99289],
[107.63489,-6.99289],
[107.6348,-6.99286],
[107.63476,-6.99286],
[107.63476,-6.9928],
[107.63477,-6.99279],
[107.63477,-6.99276],
[107.63478,-6.99274],
[107.63479,-6.99262],
[107.63538,-6.99265],
[107.63542,-6.99266],
[107.63546,-6.99266],
[107.63558,-6.99269],
[107.6357,-6.99274],
[107.63571,-6.99274],
[107.6359,-6.99283],
[107.63604,-6.99293],
[107.63595,-6.99311],
[107.63593,-6.9932],
[107.63593,-6.99323],
[107.63592,-6.99327],
[107.63592,-6.99336],
[107.63591,-6.99339],
[107.6359,-6.99346],
[107.63583,-6.99365],
[107.63583,-6.99368],
[107.6358,-6.99377],
[107.63579,-6.99384],
[107.63576,-6.99393],
[107.63575,-6.994],
[107.6357,-6.99411],
[107.63536,-6.99408],
[107.63525,-6.9941],
[107.6352,-6.9941],
[107.63517,-6.99411],
[107.63514,-6.99416],
[107.63513,-6.99417],
[107.63512,-6.99417],
[107.63512,-6.99418],
[107.63511,-6.99418],
[107.63505,-6.99416],
[107.63501,-6.99416],
[107.63494,-6.99413],
[107.63493,-6.99413],
[107.63492,-6.99416],
[107.63492,-6.99417],
[107.63479,-6.99415],
[107.63476,-6.99416],
[107.63475,-6.99416],
[107.63473,-6.99418],
[107.63472,-6.9942],
[107.63472,-6.99421],
[107.6346,-6.99428],
[107.63456,-6.99429],
[107.63444,-6.99429],
[107.63439,-6.9943],
[107.63424,-6.9943],
[107.63412,-6.99428],
[107.63424,-6.9943],
[107.63439,-6.9943],
[107.63444,-6.99429],
[107.63456,-6.99429],
[107.6346,-6.99428],
[107.63472,-6.99421],
[107.63472,-6.9942],
[107.63473,-6.99418],
[107.63475,-6.99416],
[107.63476,-6.99416],
[107.63479,-6.99415],
[107.63492,-6.99417],
[107.63492,-6.99416],
[107.63493,-6.99413],
[107.63494,-6.99413],
[107.63501,-6.99416],
[107.63505,-6.99416],
[107.63511,-6.99418],
[107.63512,-6.99418],
[107.63512,-6.99417],
[107.63513,-6.99417],
[107.63514,-6.99416],
[107.63517,-6.99411],
[107.6352,-6.9941],
[107.63525,-6.9941],
[107.63536,-6.99408],
[107.6357,-6.99411],
[107.63614,-6.9943],
[107.63619,-6.99435],
[107.63621,-6.9944],
[107.63617,-6.99494],
[107.63604,-6.99557],
[107.63604,-6.99561],
[107.63605,-6.99561],
[107.63616,-6.99564],
[107.63618,-6.99565],
[107.63619,-6.99566],
[107.63619,-6.99568],
[107.6362,-6.9957],
[107.63619,-6.99583],
[107.63618,-6.99585],
[107.63618,-6.99588],
[107.63619,-6.99591],
[107.63621,-6.99627],
[107.63622,-6.99627],
[107.63622,-6.99628],
[107.6362,-6.99628],
[107.63615,-6.99626],
[107.63612,-6.99627],
[107.63602,-6.99627],
[107.63599,-6.99626],
[107.63594,-6.99627],
[107.63592,-6.99628],
[107.63587,-6.99632],
[107.63583,-6.99633],
[107.6358,-6.99633],
[107.63575,-6.99632],
[107.63572,-6.99632],
[107.63569,-6.99631],
[107.63566,-6.99631],
[107.63565,-6.99633],
[107.63564,-6.99633],
[107.63562,-6.99634],
[107.63557,-6.99635],
[107.6353,-6.99633],
[107.63525,-6.99632],
[107.63519,-6.99632],
[107.63512,-6.99631],
[107.63506,-6.99631],
[107.63503,-6.99632],
[107.63502,-6.99633],
[107.63502,-6.99634],
[107.63492,-6.99654],
[107.63481,-6.99671],
[107.63475,-6.99683],
[107.63473,-6.99691],
[107.63471,-6.99693],
[107.63469,-6.99709],
[107.63467,-6.99711],
[107.63467,-6.99712],
[107.63457,-6.99712],
[107.63452,-6.99711],
[107.63446,-6.99711],
[107.63434,-6.99704],
[107.63428,-6.99703],
[107.63424,-6.99707],
[107.63417,-6.9971],
[107.63393,-6.99713],
[107.63386,-6.99717],
[107.63386,-6.99718],
[107.63384,-6.99723],
[107.63383,-6.99724],
[107.63377,-6.99724],
[107.6337,-6.99725],
[107.63362,-6.99728],
[107.63355,-6.9972],
[107.63344,-6.99712],
[107.63234,-6.99647],
[107.63225,-6.99643],
[107.63221,-6.99639],
[107.63216,-6.99636],
[107.6321,-6.99628],
[107.63206,-6.99621],
[107.632,-6.99596],
[107.63184,-6.99434],
[107.63175,-6.99382],
[107.63172,-6.99373],
[107.63177,-6.9937],
[107.63186,-6.99367],
[107.63212,-6.99355],
[107.63223,-6.99352],
[107.63252,-6.99352],
[107.63379,-6.99366],
[107.63301,-6.99357],
[107.63304,-6.99408],
[107.63313,-6.99424],
[107.63339,-6.99429],
[107.63364,-6.9944],
[107.63339,-6.99429],
[107.63313,-6.99424],
[107.63304,-6.99408],
[107.63301,-6.99357],
[107.63252,-6.99352],
[107.63223,-6.99352],
[107.63212,-6.99355],
[107.63186,-6.99367],
[107.63177,-6.9937],
[107.63172,-6.99373],
[107.63175,-6.99382],
[107.63184,-6.99434],
[107.632,-6.99596],
[107.63206,-6.99621],
[107.6321,-6.99628],
[107.63216,-6.99636],
[107.63221,-6.99639],
[107.63225,-6.99643],
[107.63234,-6.99647],
[107.63344,-6.99712],
[107.63355,-6.9972],
[107.63374,-6.99741],
[107.63389,-6.99768],
[107.63391,-6.99775],
[107.63394,-6.998],
[107.63392,-6.99829],
[107.63389,-6.99848],
[107.63371,-6.99914],
[107.63361,-6.9994],
[107.6336,-6.99947],
[107.63358,-6.99953],
[107.63358,-6.99961],
[107.63357,-6.99965],
[107.63359,-6.99975],
[107.63361,-6.99979],
[107.63366,-6.99986],
[107.63369,-6.99989],
[107.63373,-6.99991],
[107.63403,-6.99996],
[107.63464,-6.99999],
[107.63482,-7.00002],
[107.63503,-7.00008],
[107.63524,-7.00017],
[107.63546,-7.00031],
[107.63687,-7.0016],
[107.63708,-7.00182],
[107.63713,-7.00186],
[107.63743,-7.00201],
[107.63777,-7.00222],
[107.63861,-7.00284],
[107.63974,-7.00391],
[107.64059,-7.00462],
[107.64074,-7.00472],
[107.64114,-7.00511],
[107.64163,-7.00567],
[107.64193,-7.00605],
[107.64204,-7.00615],
[107.6421,-7.00619],
[107.64251,-7.00636],
[107.64257,-7.00637],
[107.64261,-7.00636],
[107.64266,-7.00636],
[107.64268,-7.00635],
[107.64272,-7.00634],
[107.64277,-7.00629],
[107.64319,-7.00642],
[107.64337,-7.00644],
[107.64347,-7.00644],
[107.64387,-7.00651],
[107.64435,-7.00666],
[107.64453,-7.00674],
[107.64505,-7.00705],
[107.64514,-7.00709],
[107.64586,-7.00751],
[107.64645,-7.00782],
[107.64669,-7.0079],
[107.6476,-7.00808],
[107.64786,-7.0081],
[107.64828,-7.0081],
[107.64883,-7.00802],
[107.65006,-7.00772],
[107.65057,-7.00763],
[107.65113,-7.00757],
[107.65142,-7.00752],
[107.65259,-7.00717],
[107.65282,-7.00713],
[107.65327,-7.0071],
[107.65345,-7.00711],
[107.65383,-7.00718],
[107.65411,-7.00727],
[107.65412,-7.00727],
[107.65471,-7.00751],
[107.65472,-7.00754],
[107.65481,-7.0076],
[107.65482,-7.0076],
[107.65461,-7.00813],
[107.65475,-7.00823],
[107.6547,-7.00833],
[107.6546,-7.00869],
[107.65437,-7.00931],
[107.65437,-7.00934],
[107.65434,-7.0094],
[107.65425,-7.00977],
[107.65426,-7.0098],
[107.6543,-7.00984],
[107.65432,-7.00987],
[107.65437,-7.00998],
[107.65441,-7.01002],
[107.65444,-7.01004],
[107.65455,-7.01008],
[107.6547,-7.01011],
[107.65474,-7.01013],
[107.655,-7.01021],
[107.65517,-7.01028],
[107.65533,-7.01038],
[107.65554,-7.01045],
[107.65557,-7.01047],
[107.65642,-7.01075],
[107.65656,-7.01046],
[107.6567,-7.01011],
[107.65668,-7.00918],
[107.65667,-7.00912],
[107.65664,-7.00907],
[107.65656,-7.00903],
[107.65624,-7.00894],
[107.65601,-7.00893],
[107.65594,-7.00922],
[107.65594,-7.00924],
[107.65593,-7.00926],
[107.65591,-7.00928],
[107.65589,-7.00928],
[107.65588,-7.00927],
[107.65585,-7.00927],
[107.65583,-7.00935],
[107.65582,-7.00937],
[107.65582,-7.0094],
[107.65583,-7.00941],
[107.65583,-7.00942],
[107.65584,-7.00944],
[107.65586,-7.00946],
[107.65589,-7.00953],
[107.65589,-7.00957],
[107.65588,-7.00963],
[107.65577,-7.00992],
[107.65598,-7.01],
[107.65577,-7.00992],
[107.65588,-7.00963],
[107.65589,-7.00957],
[107.65589,-7.00953],
[107.65586,-7.00946],
[107.65584,-7.00944],
[107.65583,-7.00942],
[107.65583,-7.00941],
[107.65582,-7.0094],
[107.65582,-7.00937],
[107.65583,-7.00935],
[107.65585,-7.00927],
[107.65588,-7.00927],
[107.65589,-7.00928],
[107.65591,-7.00928],
[107.65593,-7.00926],
[107.65594,-7.00924],
[107.65594,-7.00922],
[107.65601,-7.00893],
[107.65624,-7.00894],
[107.65656,-7.00903],
[107.65664,-7.00907],
[107.65667,-7.00912],
[107.65668,-7.00918],
[107.65669,-7.00947],
[107.65646,-7.00945],
[107.65607,-7.00936],
[107.65646,-7.00945],
[107.65669,-7.00947],
[107.65668,-7.00918],
[107.65667,-7.00912],
[107.65664,-7.00907],
[107.65656,-7.00903],
[107.65624,-7.00894],
[107.65601,-7.00893],
[107.6562,-7.00826],
[107.6562,-7.00822],
[107.65619,-7.0082],
[107.65618,-7.00819],
[107.65618,-7.00817],
[107.65615,-7.00815],
[107.65585,-7.00801],
[107.65608,-7.00784],
[107.65659,-7.00729],
[107.65704,-7.00748],
[107.65692,-7.00777],
[107.65692,-7.00779],
[107.65687,-7.00789],
[107.65685,-7.00788],
[107.65683,-7.00788],
[107.65659,-7.00778],
[107.65625,-7.00767],
[107.65608,-7.00784],
[107.65568,-7.00813],
[107.65554,-7.00818],
[107.65551,-7.0082],
[107.65539,-7.00825],
[107.65536,-7.00825],
[107.65521,-7.00828],
[107.65505,-7.00828],
[107.65483,-7.00825],
[107.65475,-7.00823],
[107.65461,-7.00813],
[107.65482,-7.0076],
[107.65494,-7.00764],
[107.65498,-7.00764],
[107.65499,-7.00763],
[107.65501,-7.00764],
[107.65516,-7.00766],
[107.65528,-7.00764],
[107.65543,-7.00758],
[107.65564,-7.00741],
[107.65609,-7.00692],
[107.65612,-7.0069],
[107.65629,-7.00673],
[107.65668,-7.0064],
[107.6569,-7.00629],
[107.65702,-7.00625],
[107.65703,-7.00624],
[107.65832,-7.00586],
[107.6584,-7.00582],
[107.65858,-7.00563],
[107.65864,-7.00552],
[107.65877,-7.0052],
[107.65887,-7.00372],
[107.65889,-7.00279],
[107.65893,-7.00242],
[107.65899,-7.00231],
[107.65897,-7.00216],
[107.65902,-7.0019],
[107.65901,-7.00189],
[107.65901,-7.00188],
[107.659,-7.00187],
[107.65886,-7.0018],
[107.65883,-7.00177],
[107.65883,-7.00171],
[107.65889,-7.00151],
[107.65883,-7.00171],
[107.65883,-7.00177],
[107.65886,-7.0018],
[107.659,-7.00187],
[107.65901,-7.00188],
[107.65901,-7.00189],
[107.65902,-7.0019],
[107.65897,-7.00216],
[107.65899,-7.00231],
[107.65893,-7.00242],
[107.65889,-7.00279],
[107.65875,-7.00277],
[107.65855,-7.00271],
[107.65796,-7.00245],
[107.65777,-7.00296],
[107.65856,-7.00331],
[107.65857,-7.00332],
[107.65859,-7.00333],
[107.6586,-7.00335],
[107.65866,-7.00336],
[107.65871,-7.00336],
[107.65888,-7.00339],
[107.65889,-7.00279],
[107.65893,-7.00242],
[107.65899,-7.00231],
[107.65904,-7.00227],
[107.65935,-7.0021],
[107.6596,-7.00201],
[107.66067,-7.00182],
[107.66081,-7.00178],
[107.66094,-7.00169],
[107.661,-7.00166],
[107.66115,-7.00156],
[107.66147,-7.00131],
[107.66163,-7.00124],
[107.66192,-7.00116],
[107.66202,-7.00115],
[107.66217,-7.00117],
[107.66229,-7.0012],
[107.6624,-7.00124],
[107.66254,-7.00131],
[107.66451,-7.00255],
[107.66471,-7.00265],
[107.66487,-7.00271],
[107.66488,-7.00272],
[107.6651,-7.00279],
[107.66524,-7.00281],
[107.66539,-7.0028],
[107.66547,-7.00278],
[107.66553,-7.00275],
[107.66557,-7.00274],
[107.66571,-7.00259],
[107.66666,-7.00137],
[107.66681,-7.00111],
[107.66712,-7.00076],
[107.66757,-6.99996],
[107.66812,-6.99889],
[107.66808,-6.99889],
[107.66771,-6.99884],
[107.66713,-6.99882],
[107.66508,-6.99867],
[107.6642,-6.99864],
[107.66411,-6.99866],
[107.66409,-6.99867],
[107.66407,-6.99869],
[107.66405,-6.99874],
[107.66401,-6.9992],
[107.66399,-6.99926],
[107.66399,-6.99927],
[107.66398,-6.99928],
[107.66393,-6.99931],
[107.6639,-6.99931],
[107.66052,-6.99874],
[107.6604,-6.99874],
[107.66037,-6.99875],
[107.66036,-6.99875],
[107.66031,-6.99879],
[107.6603,-6.99882],
[107.66024,-6.99891],
[107.66022,-6.99892],
[107.6602,-6.99894],
[107.66018,-6.99895],
[107.66014,-6.99896],
[107.66007,-6.99903],
[107.66002,-6.99914],
[107.65954,-7.00047],
[107.65949,-7.00055],
[107.6593,-7.00074],
[107.65916,-7.00086],
[107.6591,-7.00089],
[107.65904,-7.0009],
[107.65899,-7.00089],
[107.65892,-7.00089],
[107.65879,-7.00086],
[107.65873,-7.00087],
[107.65868,-7.00089],
[107.65859,-7.001],
[107.65853,-7.00111],
[107.65807,-7.00213],
[107.65777,-7.00296],
[107.65746,-7.00362],
[107.65622,-7.00662],
]])
def jalanDesaBojongsari1(self, nama):
self.jalan.record(nama)
self.jalan.line(
[[
[107.65621,-7.00664],
[107.65621,-7.00665],
[107.6562,-7.00667],
[107.6562,-7.00668],
[107.6562,-7.00669],
[107.65621,-7.0067],
[107.65629,-7.00673],
[107.65612,-7.0069],
[107.65609,-7.00692],
[107.65596,-7.00707],
[107.65581,-7.00722],
[107.65564,-7.00741],
[107.65555,-7.00749],
[107.65543,-7.00758],
[107.65528,-7.00764],
[107.65516,-7.00766],
[107.65501,-7.00764],
[107.65499,-7.00763],
[107.65482,-7.00757],
[107.65471,-7.00751],
[107.65462,-7.00747],
[107.65442,-7.0074],
[107.65412,-7.00727],
[107.65411,-7.00727],
[107.65383,-7.00718],
[107.65377,-7.00717],
[107.65366,-7.00715],
[107.65356,-7.00713],
[107.65345,-7.00711],
[107.65327,-7.0071],
[107.65302,-7.00711],
[107.65282,-7.00713],
[107.65259,-7.00717],
[107.6522,-7.00728],
[107.65142,-7.00752],
[107.65141,-7.00752],
[107.65113,-7.00757],
[107.65057,-7.00763],
[107.65006,-7.00772],
[107.64983,-7.00777],
[107.64964,-7.00782],
[107.64896,-7.00799],
[107.64883,-7.00802],
[107.64854,-7.00807],
[107.64828,-7.0081],
[107.64786,-7.0081],
[107.64769,-7.00809],
[107.6476,-7.00808],
[107.64727,-7.00802],
[107.64708,-7.00798],
[107.64699,-7.00796],
[107.64669,-7.0079],
[107.64645,-7.00782],
[107.64629,-7.00774],
[107.64586,-7.00751],
[107.64514,-7.00709],
[107.64505,-7.00705],
[107.64494,-7.00698],
[107.64472,-7.00685],
[107.64453,-7.00674],
[107.64442,-7.00669],
[107.64435,-7.00666],
[107.64423,-7.00662],
[107.64412,-7.00658],
[107.64399,-7.00655],
[107.64387,-7.00651],
[107.64372,-7.00648],
[107.64361,-7.00646],
[107.64347,-7.00644],
[107.64337,-7.00644],
[107.64328,-7.00643],
[107.64319,-7.00642],
[107.64309,-7.00639],
[107.64348,-7.00578],
[107.64369,-7.00545],
[107.64454,-7.00444],
[107.64489,-7.00399],
[107.64492,-7.00395],
[107.64497,-7.00395],
[107.64503,-7.00396],
[107.64513,-7.00397],
[107.64524,-7.00399],
[107.64542,-7.00401],
[107.64596,-7.00402],
[107.64605,-7.00402],
[107.64612,-7.00402],
[107.64616,-7.00403],
[107.64619,-7.00402],
[107.64621,-7.00401],
[107.64622,-7.004],
[107.64622,-7.00399],
[107.64622,-7.00398],
[107.64622,-7.00396],
[107.64623,-7.00389],
[107.64623,-7.00378],
[107.64623,-7.00368],
[107.64624,-7.0036],
[107.64624,-7.00353],
[107.64624,-7.00351],
[107.64626,-7.00349],
[107.64627,-7.00347],
[107.64628,-7.00346],
[107.64629,-7.00346],
[107.64631,-7.00346],
[107.64637,-7.00348],
[107.64656,-7.00358],
[107.64678,-7.00369],
[107.64682,-7.00371],
[107.64693,-7.00376],
[107.64712,-7.00391],
[107.64714,-7.00392],
[107.6473,-7.00397],
[107.64742,-7.00402],
[107.64743,-7.00402],
[107.64756,-7.00406],
[107.64785,-7.00412],
[107.648,-7.00417],
[107.64805,-7.00419],
[107.648,-7.00417],
[107.64806,-7.00401],
[107.64822,-7.00359],
[107.64837,-7.00324],
[107.64842,-7.00316],
[107.64843,-7.00314],
[107.64845,-7.00312],
[107.64845,-7.00311],
[107.64846,-7.0031],
[107.64846,-7.00309],
[107.64846,-7.00308],
[107.64845,-7.00307],
[107.64845,-7.00306],
[107.6482,-7.00292],
[107.64819,-7.0029],
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for lit_nlp.components.gradient_maps."""
import random
from absl.testing import absltest
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import types as lit_types
from lit_nlp.components import tcav
# TODO(lit-dev): Move glue_models out of lit_nlp/examples
from lit_nlp.examples.models import glue_models
from lit_nlp.lib import caching # for hash id fn
from lit_nlp.lib import testing_utils
import numpy as np
JsonDict = lit_types.JsonDict
Spec = lit_types.Spec
BERT_TINY_PATH = 'https://storage.googleapis.com/what-if-tool-resources/lit-models/sst2_tiny.tar.gz' # pylint: disable=line-too-long
import transformers
BERT_TINY_PATH = transformers.file_utils.cached_path(BERT_TINY_PATH,
extract_compressed_file=True)
class ModelBasedTCAVTest(absltest.TestCase):
def setUp(self):
super(ModelBasedTCAVTest, self).setUp()
self.tcav = tcav.TCAV()
self.model = caching.CachingModelWrapper(
glue_models.SST2Model(BERT_TINY_PATH), 'test')
def test_tcav(self):
random.seed(0) # Sets seed since create_comparison_splits() uses random.
# Basic test with dummy outputs from the model.
examples = [
{'sentence': 'a'},
{'sentence': 'b'},
{'sentence': 'c'},
{'sentence': 'd'},
{'sentence': 'e'},
{'sentence': 'f'},
{'sentence': 'g'},
{'sentence': 'h'},
{'sentence': 'i'}]
indexed_inputs = [{'id': caching.input_hash(ex), 'data': ex}
for ex in examples]
dataset = lit_dataset.IndexedDataset(id_fn=caching.input_hash,
indexed_examples=indexed_inputs)
config = {
'concept_set_ids': [indexed_inputs[0]['id'],
indexed_inputs[2]['id'],
indexed_inputs[3]['id'],
indexed_inputs[7]['id']],
'class_to_explain': '1',
'grad_layer': 'cls_grad',
'random_state': 0,
'dataset_name': 'test'
}
result = self.tcav.run_with_metadata(indexed_inputs, self.model, dataset,
config=config)
self.assertLen(result, 1)
expected = {
'p_val': 0.13311,
'random_mean': 0.56667,
'result': {
'score': 0.33333,
'cos_sim': [
0.088691, -0.12179, 0.16013,
0.24840, -0.09793, 0.05166,
-0.21578, -0.06560, -0.14759
],
'dot_prods': [
189.085096, -266.36317, 344.350498,
547.144949, -211.663965, 112.502439,
-472.72066, -144.529598, -323.31888
],
'accuracy': 0.66667
}
}
testing_utils.assert_deep_almost_equal(self, expected, result[0])
def test_tcav_sample_from_positive(self):
# Tests the case where more concept examples are passed than non-concept
# examples, so the concept set is sampled from the concept examples.
random.seed(0) # Sets seed since create_comparison_splits() uses random.
# Basic test with dummy outputs from the model.
examples = [
{'sentence': 'a'},
{'sentence': 'b'},
{'sentence': 'c'},
{'sentence': 'd'},
{'sentence': 'e'},
{'sentence': 'f'},
{'sentence': 'g'},
{'sentence': 'h'}]
indexed_inputs = [{'id': caching.input_hash(ex), 'data': ex}
for ex in examples]
dataset = lit_dataset.IndexedDataset(id_fn=caching.input_hash,
indexed_examples=indexed_inputs)
config = {
'concept_set_ids': [indexed_inputs[0]['id'],
indexed_inputs[2]['id'],
indexed_inputs[3]['id'],
indexed_inputs[4]['id'],
indexed_inputs[7]['id']],
'class_to_explain': '1',
'grad_layer': 'cls_grad',
'random_state': 0
}
result = self.tcav.run_with_metadata(indexed_inputs, self.model, dataset,
config=config)
self.assertLen(result, 1)
expected = {
'p_val': 0.80489,
'random_mean': 0.53333,
'result': {
'score': 0.8,
'cos_sim': [
0.09527, -0.20442, 0.05141,
0.14985, 0.06750, -0.28244,
-0.11022, -0.14479
],
'dot_prods': [
152.48776, -335.64998, 82.99588,
247.80113, 109.53684, -461.81805,
-181.29095, -239.47817
],
'accuracy': 1.0
}
}
testing_utils.assert_deep_almost_equal(self, expected, result[0])
def test_relative_tcav(self):
# Tests passing in a negative set.
random.seed(0) # Sets seed since create_comparison_splits() uses random.
# Basic test with dummy outputs from the model.
examples = [
{'sentence': 'happy'}, # 0
{'sentence': 'sad'}, # 1
{'sentence': 'good'}, # 2
{'sentence': 'bad'}, # 3
{'sentence': 'pretty'}, # 4
{'sentence': 'ugly'}, # 5
{'sentence': 'sweet'}, # 6
{'sentence': 'bitter'}, # 7
{'sentence': 'well'}, # 8
{'sentence': 'poor'}, # 9
{'sentence': 'compelling'}, # 10
{'sentence': 'boring'}, # 11
{'sentence': 'pleasing'}, # 12
{'sentence': 'gross'}, # 13
{'sentence': 'blue'}, # 14
{'sentence': 'red'}, # 15
{'sentence': 'flower'}, # 16
{'sentence': 'bee'}, # 17
{'sentence': 'snake'}, # 18
{'sentence': 'windshield'}, # 19
{'sentence': 'plant'}, # 20
{'sentence': 'scary'}, # 21
{'sentence': 'pencil'}, # 22
{'sentence': 'hello'} # 23
]
indexed_inputs = [{'id': caching.input_hash(ex), 'data': ex}
for ex in examples]
dataset = lit_dataset.IndexedDataset(id_fn=caching.input_hash,
indexed_examples=indexed_inputs)
# This first example doesn't have enough examples for statistical testing,
# so the returned p-value is None.
config = {
'concept_set_ids': [indexed_inputs[0]['id'],
indexed_inputs[2]['id'],
indexed_inputs[4]['id']],
'negative_set_ids': [indexed_inputs[1]['id'],
indexed_inputs[3]['id'],
indexed_inputs[5]['id']],
'class_to_explain': '1',
'grad_layer': 'cls_grad',
'random_state': 0
}
result = self.tcav.run_with_metadata(indexed_inputs, self.model, dataset,
config=config)
self.assertLen(result, 1)
expected = {
'result': {
'score': 1.0,
'cos_sim': [
0.9999999581246426, 0.049332143689572144, 0.8987945047547466,
-0.41858423757857954, 0.6908297036543664, -0.5167857909664919,
0.8423017503220364, -0.005793079244916016, 0.8334491603894322,
-0.4054645113448612, 0.7616102123736647, -0.4578596155267783,
0.8366905563807711, -0.27390786544756535, 0.7325538474066896,
0.5190287630768531, 0.8145227936096425, 0.02005592868363552,
-0.1143256029298114, -0.1221480700842533, 0.6852995739227957,
0.3984620730733816, 0.5211149530112407, 0.5909723902471223
],
'dot_prods': [
1385.1480610241554, 69.95638452724207, 1239.4947646060161,
-595.253135700978, 971.5880156862692, -725.0749813217176,
1182.8641913758102, -8.149647641120662, 1146.5803071544124,
-576.4043054391316, 1038.3510704649307, -648.097269442522,
1154.4720122394317, -378.32103870822493, 1024.066390571124,
738.6959135414066, 1139.7963358416857, 28.691395032352318,
-167.37808507284706, -176.4474746971391, 959.5159619261449,
562.8772536987927, 716.7270332848395, 840.7031847912738
],
'accuracy': 0.5
},
'p_val': None,
'random_mean': 0.9285714285714286,
'split_size': 3,
'num_runs': 1
}
testing_utils.assert_deep_almost_equal(self, expected, result[0])
# This example has enough inputs for two runs of size 3.
config = {
'concept_set_ids': [
indexed_inputs[1]['id'], indexed_inputs[2]['id'],
indexed_inputs[4]['id'], indexed_inputs[5]['id'],
indexed_inputs[10]['id'], indexed_inputs[9]['id']
],
'negative_set_ids': [
indexed_inputs[0]['id'], indexed_inputs[3]['id'],
indexed_inputs[12]['id'], indexed_inputs[6]['id'],
indexed_inputs[7]['id'], indexed_inputs[8]['id']
],
'class_to_explain': '0',
'grad_layer': 'cls_grad',
'random_state': 0
}
result = self.tcav.run_with_metadata(indexed_inputs, self.model, dataset,
config=config)
self.assertLen(result, 1)
expected = {
'result': {
'score': 0.0,
'cos_sim': [
0.2731987606830683, 0.427838045403812, 0.3166440584420665,
-0.1358964965831398, 0.5616614702946262, -0.16511808390168164,
-0.05103355252438478, -0.16945565920473257, 0.28148962348967155,
-0.18169036476392003, 0.33244873698665106, -0.13316476546155087,
0.15226772288202886, -0.05534469666649352, 0.2886150002073456,
0.33888135113008555, 0.12875301375254147, 0.046908665182593096,
-0.052445114502024985, 0.088858405172313, 0.219517174438115,
0.35833013079793435, 0.2291162415605806, 0.3635686086637199
],
'dot_prods': [
452.17220644153525, 724.9460578876271, 521.776546745851,
-230.9170522777958, 943.8754747127095, -276.8190148523963,
-85.63511897570154, -284.8487792023684, 462.71830216201926,
-308.62790255581496, 541.5830529968077, -225.2299308998058,
251.04716264718752, -91.33998249705493, 482.0991668852444,
576.3029773313335, 215.28329927312336, 80.18458502795752,
-91.74640483442752, 153.37559992294862, 367.2562273288043,
604.8378479001944, 376.53473821563625, 618.003311205616
],
'accuracy': 0.5
},
'p_val': 0.42264973081037427,
'random_mean': 0.0,
'split_size': 3,
'num_runs': 2
}
testing_utils.assert_deep_almost_equal(self, expected, result[0])
# This example has enough examples for three runs of size 3 and two runs of
# size 5, and returns results with p-value < 0.05.
config = {
'concept_set_ids': [indexed_inputs[0]['id'],
indexed_inputs[1]['id'],
indexed_inputs[2]['id'],
indexed_inputs[3]['id'],
indexed_inputs[4]['id'],
indexed_inputs[5]['id'],
indexed_inputs[6]['id'],
indexed_inputs[7]['id'],
indexed_inputs[8]['id'],
indexed_inputs[9]['id']],
'negative_set_ids': [indexed_inputs[10]['id'],
indexed_inputs[11]['id'],
indexed_inputs[12]['id'],
indexed_inputs[13]['id'],
indexed_inputs[14]['id'],
indexed_inputs[15]['id'],
indexed_inputs[16]['id'],
indexed_inputs[17]['id'],
indexed_inputs[18]['id'],
indexed_inputs[19]['id']],
'class_to_explain': '1',
'grad_layer': 'cls_grad',
'random_state': 0
}
result = self.tcav.run_with_metadata(indexed_inputs, self.model, dataset,
config=config)
self.assertLen(result, 1)
expected = [{
'result': {
'score': 0.42857142857142855,
'cos_sim': [
-0.1107393877916321, -0.0993967046974328, -0.2214985917242054,
0.08132588965575606, -0.3590211572508748, 0.18708109817461333,
0.000724498781128839, 0.09700473783330398, -0.25015742815240055,
0.16108236033785076, -0.10283274286140846, 0.0972663321478731,
-0.05924679176256152, -0.048499696342091746,
-0.4357117016074766, -0.593245752003111, -0.3645147796989344,
-0.5507605083253673, -0.27914997949782694, -0.30908550968594417,
-0.5584676299422896, -0.16983339994284577, -0.42587740852240746,
-0.37482298817032594
],
'dot_prods': [
-261.4389298435066, -240.23776409902007, -520.6275907607769,
197.11495117497446, -860.6035066083074, 447.3775519523981,
1.7341104803878409, 232.59170976304426, -586.5576327736542,
390.2961568516803, -238.95427152619726, 234.6617547723058,
-139.3334215524385, -114.17392512371171, -1038.149036709951,
-1439.0663895591745, -869.3828698612926, -1342.899780229334,
-696.569760699206, -760.9907977738051, -1332.7284530349625,
-408.90435403478875, -998.3360993150825, -908.8111404537224
],
'accuracy': 0.75
},
'p_val': 0.04400624968940752,
'random_mean': 0.9642857142857143,
'split_size': 5,
'num_runs': 2
}]
testing_utils.assert_deep_almost_equal(self, expected, result)
class TCAVTest(absltest.TestCase):
def setUp(self):
super(TCAVTest, self).setUp()
self.tcav = tcav.TCAV()
self.model = glue_models.SST2Model(BERT_TINY_PATH)
def test_hyp_test(self):
# t-test where p-value != 1.
scores = [0, 0, 0.5, 0.5, 1, 1]
random_scores = [3, 5, -8, -100, 0, -90]
result = self.tcav.hyp_test(scores, random_scores)
self.assertAlmostEqual(0.1415165926492605, result)
# t-test where p-value = 1.
scores = [0.1, 0.13, 0.19, 0.09, 0.12, 0.1]
random_scores = [0.1, 0.13, 0.19, 0.09, 0.12, 0.1]
result = self.tcav.hyp_test(scores, random_scores)
self.assertEqual(1.0, result)
def test_compute_tcav_score(self):
dir_deriv_positive_class = [1]
result = self.tcav.compute_tcav_score(dir_deriv_positive_class)
self.assertAlmostEqual(1, result)
dir_deriv_positive_class = [0]
result = self.tcav.compute_tcav_score(dir_deriv_positive_class)
self.assertAlmostEqual(0, result)
dir_deriv_positive_class = [1, -5, 4, 6.5, -3, -2.5, 0, 2]
result = self.tcav.compute_tcav_score(dir_deriv_positive_class)
self.assertAlmostEqual(0.5, result)
def test_get_trained_cav(self):
# 1D inputs.
x = [[1], [1], [1], [2], [1], [1], [-1], [-1], [-2], [-1], [-1]]
y = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
cav, accuracy = self.tcav.get_trained_cav(x, y, 0.33, random_state=0)
np.testing.assert_almost_equal(np.array([[19.08396947]]), cav)
self.assertAlmostEqual(1.0, accuracy)
# 2D inputs.
x = [[-8, 1], [5, 3], [3, 6], [-2, 5], [-8, 10], [10, -5]]
y = [1, 0, 0, 1, 1, 0]
cav, accuracy = self.tcav.get_trained_cav(x, y, 0.33, random_state=0)
np.testing.assert_almost_equal(np.array([[-77.89678676, 9.73709834]]), cav)
self.assertAlmostEqual(1.0, accuracy)
def test_compute_local_scores(self):
cav = np.array([[0, 1]])
dataset_outputs = [
{
'probas': [0.2, 0.8],
'cls_emb': [5, 12]
},
{
'probas': [0.6, 0.4],
'cls_emb': [3, 4]
}
]
cos_sim, dot_prods = self.tcav.compute_local_scores(
cav, dataset_outputs, 'cls_emb')
self.assertListEqual([12, 4], dot_prods)
# Magnitude of cav is 1, magnitude of cls_embs are [13, 5].
# Cosine similarity is dot / (cav_mag * cls_embs_mag),
# which is [12/13, 4/5].
self.assertListEqual([0.9230769230769231, 0.8], cos_sim)
cav = np.array([[1, 2, 3]])
dataset_outputs = [
{
'probas': [0.2, 0.8],
'cls_emb': [3, 2, 1]
},
{
'probas': [0.6, 0.4],
'cls_emb': [1, 2, 0]
}
]
cos_sim, dot_prods = self.tcav.compute_local_scores(
cav, dataset_outputs, 'cls_emb')
self.assertListEqual([10, 5], dot_prods)
self.assertListEqual([0.7142857142857143, 0.5976143046671968],
cos_sim)
def test_get_dir_derivs(self):
cav = np.array([[1, 2, 3]])
dataset_outputs = [
{
'probas': [0.2, 0.8],
'cls_grad': [3, 2, 1],
'grad_class': '1'
},
{
'probas': [0.6, 0.4],
'cls_grad': [1, 2, 0],
'grad_class': '0'
}
]
# Example where only the first output is in class_to_explain 1.
dir_derivs = self.tcav.get_dir_derivs(
cav, dataset_outputs, 'cls_grad', 'grad_class',
class_to_explain='1')
self.assertListEqual([10], | |
from .toornament_connection import SyncToornamentConnection, AsyncToornamentConnection
from .viewer_schemas import *
from typing import Optional
from .range import Range
class SyncViewerAPI(SyncToornamentConnection):
@staticmethod
def _base_url():
return 'https://api.toornament.com/viewer/v2'
def get_match(self, tournament_id, id):
"""Retrieve a single match of a tournament.
Returns a match with all its games and opponents. In ffa matches only the first four opponents are included in each match game.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param id: The id of the match to retrieve."""
tournament_id = str(tournament_id)
id = str(id)
method = 'GET'
path = '/tournaments/{tournament_id}/matches/{id}'
path_mapping = {
'tournament_id': tournament_id,
'id': id,
}
query_parameters = {
}
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return MatchDetailed(**content)
def get_matches_from_tournament(self, tournament_id, *, range: Range, stage_ids: Optional[list] = None,
stage_numbers: Optional[list] = None, group_ids: Optional[list] = None,
group_numbers: Optional[list] = None, round_ids: Optional[list] = None,
round_numbers: Optional[list] = None, statuses: Optional[list] = None,
is_scheduled: Optional[bool] = None, scheduled_before: Optional[str] = None,
scheduled_after: Optional[str] = None, participant_ids: Optional[list] = None,
sort: Optional[str] = None):
"""Retrieve matches of a tournament.
Returns the matches of a tournament. In ffa matches only the first four opponents are included in each match.
:param range: A range of requested items using the 'matches' unit. The size of the range can not exceed 128. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param tournament_id: The id of the tournament you want to retrieve data about.
:param stage_ids: One or several stage ids to filter.
:param stage_numbers: One or several stage numbers to filter.
:param group_ids: One or several group ids to filter.
:param group_numbers: One or several group numbers to filter.
:param round_ids: One or several round ids to filter.
:param round_numbers: One or several round numbers to filter.
:param statuses: One or several match statuses to filter.
:param is_scheduled: Whether to include scheduled matches.
:param scheduled_before: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled before or at the datetime.
:param scheduled_after: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled after or at the datetime
:param participant_ids: One or several participant ids involved in the matches to filter.
:param sort: A method to sort the filtered data. "structure" sorts using the stage, group, round and match numbers. "schedule" sorts using the scheduled date. "latest results" sorts using the date at which the matches were played (not scheduled)."""
tournament_id = str(tournament_id)
stage_ids = [str(e) for e in stage_ids] if stage_ids else stage_ids
group_ids = [str(e) for e in group_ids] if group_ids else group_ids
round_ids = [str(e) for e in round_ids] if round_ids else round_ids
participant_ids = [str(e) for e in participant_ids] if participant_ids else participant_ids
method = 'GET'
path = '/tournaments/{tournament_id}/matches'
path_mapping = {
'tournament_id': tournament_id,
}
query_parameters = {
}
if stage_ids:
query_parameters['stage_ids'] = stage_ids
if stage_numbers:
query_parameters['stage_numbers'] = stage_numbers
if group_ids:
query_parameters['group_ids'] = group_ids
if group_numbers:
query_parameters['group_numbers'] = group_numbers
if round_ids:
query_parameters['round_ids'] = round_ids
if round_numbers:
query_parameters['round_numbers'] = round_numbers
if statuses:
query_parameters['statuses'] = statuses
if is_scheduled:
query_parameters['is_scheduled'] = is_scheduled
if scheduled_before:
query_parameters['scheduled_before'] = scheduled_before
if scheduled_after:
query_parameters['scheduled_after'] = scheduled_after
if participant_ids:
query_parameters['participant_ids'] = participant_ids
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'matches'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [Match(**match) for match in content]
def get_matches_from_discipline(self, discipline_id, *, range: Range, is_featured: Optional[bool] = None,
statuses: Optional[list] = None, scheduled_before: Optional[str] = None,
scheduled_after: Optional[str] = None, participant_ids: Optional[list] = None,
tournament_ids: Optional[list] = None, sort: Optional[str] = None):
"""Retrieve matches of a discipline, regardless of their tournament.
Returns matches of a discipline. In ffa matches only the first four opponents are included in each match game.
:param range: A range of requested items using the 'matches' unit. The size of the range can not exceed 128. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param discipline_id: The string id of the discipline.
:param is_featured: Whether to include featured tournaments.
:param statuses: One or several match statuses to filter.
:param scheduled_before: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled before or at the datetime.
:param scheduled_after: A datetime in RFC 3339 format (combined date, time and utc offset), to include all matches scheduled after or at the datetime
:param participant_ids: One or several participant ids involved in the matches to filter.
:param tournament_ids: List of tournament IDs to filter the data with.
:param sort: A method to sort the filtered data. "structure" sorts using the stage, group, round and match numbers. "schedule" sorts using the scheduled date. "latest results" sorts using the date at which the matches were played (not scheduled)."""
discipline_id = str(discipline_id)
participant_ids = [str(e) for e in participant_ids] if participant_ids else participant_ids
tournament_ids = [str(e) for e in tournament_ids] if tournament_ids else tournament_ids
method = 'GET'
path = '/disciplines/{discipline_id}/matches'
path_mapping = {
'discipline_id': discipline_id,
}
query_parameters = {
}
if is_featured:
query_parameters['is_featured'] = is_featured
if statuses:
query_parameters['statuses'] = statuses
if scheduled_before:
query_parameters['scheduled_before'] = scheduled_before
if scheduled_after:
query_parameters['scheduled_after'] = scheduled_after
if participant_ids:
query_parameters['participant_ids'] = participant_ids
if tournament_ids:
query_parameters['tournament_ids'] = tournament_ids
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'matches'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [MatchDiscipline(**match) for match in content]
def get_bracket_nodes(self, tournament_id, stage_id, *, range: Range, group_ids: Optional[list] = None,
group_numbers: Optional[list] = None, round_ids: Optional[list] = None,
round_numbers: Optional[list] = None, min_depth: Optional[int] = None,
max_depth: Optional[int] = None):
"""Retrieve bracket nodes of a stage and tournament.
Returns the bracket nodes of a stage. A bracket node represents a match and some extra data.
:param range: A range of requested items using the 'nodes' unit. The size of the range can not exceed 128. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param tournament_id: The id of the tournament you want to retrieve data about.
:param stage_id: The id of the stage you want to retrieve data about.
:param group_ids: A list of group ids to filter.
:param group_numbers: A list of group numbers to filter.
:param round_ids: A list of round ids to filter.
:param round_numbers: A list of round numbers to filter.
:param min_depth: A minimum depth to filter.
:param max_depth: A maximal depth to filter."""
tournament_id = str(tournament_id)
stage_id = str(stage_id)
group_ids = [str(e) for e in group_ids] if group_ids else group_ids
round_ids = [str(e) for e in round_ids] if round_ids else round_ids
method = 'GET'
path = '/tournaments/{tournament_id}/stages/{stage_id}/bracket-nodes'
path_mapping = {
'tournament_id': tournament_id,
'stage_id': stage_id,
}
query_parameters = {
}
if group_ids:
query_parameters['group_ids'] = group_ids
if group_numbers:
query_parameters['group_numbers'] = group_numbers
if round_ids:
query_parameters['round_ids'] = round_ids
if round_numbers:
query_parameters['round_numbers'] = round_numbers
if min_depth:
query_parameters['min_depth'] = min_depth
if max_depth:
query_parameters['max_depth'] = max_depth
if not range.unit:
range.unit = 'nodes'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [BracketNode(**node) for node in content]
def get_custom_fields(self, tournament_id, *, target_type: Optional[str] = None):
"""Retrieves custom fields of a tournament.
Returns the complete definition of all custom fields for a given tournament. This includes both public and private custom fields.
A custom field may be associated to a player, a team or a team's player. For more information, please read the [Custom Fields](https://developer.toornament.com/v2/core-concepts/custom-fields) documentation.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param target_type: The entity affected by the custom fields."""
tournament_id = str(tournament_id)
method = 'GET'
path = '/tournaments/{tournament_id}/custom-fields'
path_mapping = {
'tournament_id': tournament_id,
}
query_parameters = {
}
if target_type:
query_parameters['target_type'] = target_type
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [CustomField(**field) for field in content]
def get_disciplines(self, *, range: Range):
"""Retrieve the list of available | |
<reponame>astroumd/admit
#! /usr/bin/env casarun
#
#
# admit1.py : an example ADMIT pipeline/flow for line cubes with an optional continuum map
#
# Usage: $ADMIT/admit/test/admit1.py [line.fits [alias]] [cont.fits]
#
# this will create a line.admit directory with all the
# BDP's and associated data products inside. Linecubes will
# have their own directory with their associates products
# within that directory.
# Optionally you can give a continuum image, cont.fits, but it
# is currently not used for much.
#
# if you give an admit directory, it will try and re-run in that directory.
# although it is suggested to use admit0.py as a template, edit this,
# and re-run ADMIT to compute different instances of the flow
version = '30-jul-2015'
# ===>>> set some parameters for this run <<<=================================================================
file = 'foobar.fits' # the default FITS input name to be ingested
cont = '' # add this continuum fits file to the ingestion process?
useMask = True # use a mask where fits data == 0.0
alias = '' # use a short alias instead of the possibly long basename?
vlsr = None # either set it below, or make get_vlsr() to work (else vlsr=0 will be used)
maxpos = [] # default to the peak in the cube for CubeSpectrum
robust = ('hin',1.5) # default hinges-fences
clean = True # clean up the previous admit tree, i.e. no re-running
plotmode = 0 # 0=batch 1=interactive 2=interactive at the very end of all plots
plottype = 'png' # jpg, png, svg, ....(standard matplotlib options)
loglevel = 15 # 10=DEBUG, 15=TIMING 20=INFO 30=WARNING 40=ERROR 50=FATAL
usePeak = True # LineCubeSpectra through peak of a Moment0 map? (else pos[] or SpwCube Peak)
useCSM = False # if usePeak, should CubeSum (CSM) be used (instead of mom0 from LineCube)
pvslice = [] # PV Slice (x0,y0,x1,y1)
pvslit = [] # PV Slice (xc,yc,len,pa) if none given, it will try and find out
useUID = False # True = LineUID is more robust, but no real lineID
usePPP = True # create and use PeakPointPlot?
useMOM = True # if no lines are found, do a MOM0,1,2 anyways ?
pvSmooth = [10,10] # smooth the PVslice ? Pos or Pos,Vel pixel numbers
# =================================================================================================================
# python system modules
import sys, os, math
# essential admit modules
from admit.AT import AT
import admit.Admit as admit
import admit.util.bdp_types as bt
# AT's we need (we normally don't need BDP's in user code such as this)
from admit.at.Ingest_AT import Ingest_AT
from admit.at.CubeStats_AT import CubeStats_AT
from admit.at.CubeSpectrum_AT import CubeSpectrum_AT
from admit.at.CubeSum_AT import CubeSum_AT
from admit.at.FeatureList_AT import FeatureList_AT
from admit.at.LineID_AT import LineID_AT
from admit.at.LineUID_AT import LineUID_AT
from admit.at.LineCube_AT import LineCube_AT
from admit.at.Moment_AT import Moment_AT
from admit.at.PVSlice_AT import PVSlice_AT
from admit.at.PVCorr_AT import PVCorr_AT
# Example how to get predefined position(s) for CubeSpectrum
def get_pos(i):
""" return key positions in N253 (1..10) from Meier's Table 2:
0 = blank, if you want to use the peak in the cube
11 = map center, the reference position of N253
See also http://adsabs.harvard.edu/abs/2015ApJ...801...63M
"""
pos = [ [], # 0 = blank
['00h47m33.041s', '-25d17m26.61s' ], # pos 1
['00h47m32.290s', '-25d17m19.10s' ], # 2
['00h47m31.936s', '-25d17m29.10s' ], # 3
['00h47m32.792s', '-25d17m21.10s' ], # 4
['00h47m32.969s', '-25d17m19.50s' ], # 5
['00h47m33.159s', '-25d17m17.41s' ], # 6
['00h47m33.323s', '-25d17m15.50s' ], # 7
['00h47m33.647s', '-25d17m13.10s' ], # 8
['00h47m33.942s', '-25d17m11.10s' ], # 9
['00h47m34.148s', '-25d17m12.30s' ], # pos 10
['00h47m33.100s', '-25d17m17.50s' ], # map reference
]
return pos[i]
# placeholder until we have an official way to find the VLSR of a source
def get_vlsr(source, vlsr=None):
"""simple vlsr (by source) lookup
"""
vcat = { 'NGC253' : 236.0,
'NGC3256' : 2794.2, # test0 (foobar.fits)
'NGC6503' : 25.0,
'185' : 4.0, # test201
'188' : 4.0, # test202
'Serpens_Main': 8.0,
'SERPSNW' : 8.0,
'L1551 NE' : 7.0, # test1 (should be 6.9 ???)
'ST11' : 280.0, # in the LMC, but not sure if the VLSR to close enough
}
# if it was given, return that
if vlsr != None : return vlsr
# if it was in our cute catalog, return that
if vcat.has_key(source): return vcat[source]
# if all else fails, return 0, or go hunt down NED or SIMBAD (astroquery?)
print "GET_VLSR: unknown source %s, using vlsr=0.0" % source
print "Known sources are: ",vcat.keys()
return 0.0
def admit_dir(file):
""" create the admit directory name from a filename
This filename can be a FITS file (usually with a .fits extension
or a directory, which would be assumed to be a CASA image or
a MIRIAD image
"""
loc = file.rfind('.')
ext = '.admit'
if loc < 0:
return file + ext
else:
if file[loc:] == ext:
print "Warning: assuming a re-run on existing ",file
return file
return file[:loc] + ext
def get_admit_vars(module_name):
module = globals().get(module_name, None)
book = {}
if module:
book = {key: value for key, value in module.__dict__.iteritems() if not (key.startswith('__') or key.startswith('_'))}
return book
# Before command line parsing, attempt to find 'admit_vars.py' with variables to override the admit vars here
# this doesn't work yet, since CASA modifies the python environment
try:
print 'Trying admit_vars'
import admit_vars
book = get_admit_vars('admit_vars')
for key,val in book.iteritems():
# print "ADMIT_VAR: ",key,val,type(key),type(val)
exec(key + '=' + repr(val))
except:
print "No admit_vars.py found, and that's ok."
# allow a command line argument to be the fits file name
argv = admit.casa_argv(sys.argv)
if len(argv) > 1:
file = argv[1]
alias = ""
if len(argv) > 2:
file = argv[1]
alias = argv[2]
if len(argv) > 3:
file = argv[1]
alias = argv[2]
cont = argv[3]
#-----------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------- start of script -----------------------------------------------
# announce version
print 'ADMIT1: Version ',version
# do the work in a proper ".admit" directory
adir = admit_dir(file)
# dirty method, it really should check if adir is an admit directory
if clean and adir != file:
print "Removing previous results from ",adir
os.system('rm -rf %s' % adir)
create=True
else:
create=False
a = admit.Admit(adir,name='Testing ADMIT1 style pipeline - version %s' % version,create=create,loglevel=loglevel)
if a.new:
print "Starting a new ADMIT using ",argv[0]
os.system('cp -a %s %s' % (argv[0],adir))
else:
print "All done, we just read an existing admit.xml and it should do nothing"
print "Use admit0.py to re-run inside of your admit directory"
#
a.show()
a.showsetkey()
sys.exit(0)
# Default ADMIT plotting environment
a.plotmode(plotmode,plottype)
# Ingest
ingest1 = a.addtask(Ingest_AT(file=file,alias=alias))
a[ingest1].setkey('mask',useMask)
bandcube1 = (ingest1,0)
if cont != '':
ingest2 = a.addtask(Ingest_AT(file=cont,alias=alias+'-cont'))
contmap = (ingest2,0)
#
cubestats2 = a.addtask(CubeStats_AT(), [contmap])
#a[cubestats1].setkey('robust',robust)
csttab2 = (cubestats2,0)
featurelist2 = a.addtask(FeatureList_AT(), [contmap])
# CubeStats - will also do log(Noise),log(Peak) plot
cubestats1 = a.addtask(CubeStats_AT(), [bandcube1])
a[cubestats1].setkey('robust',robust)
a[cubestats1].setkey('ppp',usePPP)
csttab1 = (cubestats1,0)
# CubeSum
moment1 = a.addtask(CubeSum_AT(), [bandcube1,csttab1])
a[moment1].setkey('numsigma',2.0) # Nsigma clip in cube
a[moment1].setkey('sigma',99.9) # force single cuberms sigma from cubestats
csmom0 = (moment1,0)
# CubeSpectrum
if len(maxpos) > 0:
cubespectrum1 = a.addtask(CubeSpectrum_AT(), [bandcube1])
a[cubespectrum1].setkey('pos',maxpos)
else:
#cubespectrum1 = a.addtask(CubeSpectrum_AT(), [bandcube1,csttab1])
cubespectrum1 = a.addtask(CubeSpectrum_AT(), [bandcube1,csmom0])
csptab1 = (cubespectrum1,0)
# PVSlice
if len(pvslice) == 4:
# hardcoded with a start,end
slice1 = a.addtask(PVSlice_AT(slice=pvslice,width=11),[bandcube1])
elif len(pvslit) == 4:
# hardcoded with a center,len,pa
slice1 = a.addtask(PVSlice_AT(slit=pvslit,width=11),[bandcube1])
else:
# use cubesum's map to generate the best slice
# the PPP method didn't seem to work so well yet, and is slow, so still commented out here
#slice1 = a.addtask(PVSlice_AT(width=5),[bandcube1,csttab1])
slice1 = a.addtask(PVSlice_AT(width=5),[bandcube1,csmom0])
a[slice1].setkey('clip',0.3) # TODO: this is an absolute number for mom0
a[slice1].setkey('gamma',1.0) # SB185 works better with gamma=4
a[slice1].setkey('smooth',pvSmooth) # smooth, in pixel numbers
pvslice1 = (slice1,0)
corr1 = a.addtask(PVCorr_AT(),[pvslice1,csttab1])
pvcorr1 = (corr1,0)
a.run() # run now, LineID_AT needs the VLSR here
a.write()
print "OBJECT 1:", a.summaryData.get('object')
vlsr = get_vlsr(a.summaryData.get('object')[0].getValue()[0],vlsr)
print "VLSR = ",vlsr
if useUID:
lineid4 = a.addtask(LineUID_AT(vlsr=vlsr,method=2), [csttab1])
a[lineid4].setkey('pmin',4.0)
a[lineid4].setkey('minchan',6)
a[lineid4].setkey('maxgap',5) # 20 for the SB outflows
a[lineid4].setkey('bottom',True)
lltab1 = (lineid4,0)
else:
lineid1 = a.addtask(LineID_AT(vlsr=vlsr,segment="ADMIT"), [csptab1,csttab1])
lltab1 = (lineid1,0)
a.run() # run now, we need nlines
a.write()
nlines = len(a[lltab1[0]][0])
print "nlines=",nlines
# LineCube
linecube1 = a.addtask(LineCube_AT(), [bandcube1,lltab1])
a.run() # run again, we need to find out how many cubes created
a.write()
nlines = len(a[linecube1])
print "Found %d lines during runtime" % nlines
x = range(nlines) # place holder to contain mol/line
m = {} # task id for moment on this mol/line
sp= {} # task id for cubespectrum on this mol/line
st= {} # task id for cubestats
# loop over all lines ; produce linecubes, moments and spectra
for i in range(nlines):
x[i] = a[linecube1][i].getimagefile(bt.CASA)
print "LineDir:", i, x[i]
# Moment maps from the LineCube
linecubei = (linecube1,i)
m[x[i]] = a.addtask(Moment_AT(),[linecubei,csttab1])
print "MOMENT_AT:",m[x[i]]
a[m[x[i]]].setkey('moments',[0,1,2])
#a[m[x[i]]].setkey('cutoff',[2.0,3.0,3.0])
a[m[x[i]]].setkey('numsigma',[2.0])
a[m[x[i]]].setkey('mom0clip',2.0) # TODO: this is still an absolute value
if usePeak:
if useCSM:
momenti0 = csmom0 # CubeSum mom0
else:
momenti0 = (m[x[i]],0) # this linecube mom0
# CubeSpectrum through the line cube where the | |
<reponame>LittleNed/toontown-stride<filename>toontown/questscripts/TTQUESTS.py
# NOTE: \a is the delimiter for chat pages
# Quest ids can be found in Quests.py
SCRIPT = '''
ID reward_100
SHOW laffMeter
LERP_POS laffMeter 0 0 0 1
LERP_SCALE laffMeter 0.2 0.2 0.2 1
WAIT 1.5
ADD_LAFFMETER 1
WAIT 1
LERP_POS laffMeter -1.18 0 -0.87 1
LERP_SCALE laffMeter 0.075 0.075 0.075 1
WAIT 1
FINISH_QUEST_MOVIE
# TUTORIAL
ID tutorial_mickey
LOAD_SFX soundRun "phase_3.5/audio/sfx/AV_footstep_runloop.ogg"
LOAD_CC_DIALOGUE mickeyTutorialDialogue_1 "phase_3/audio/dial/CC_%s_tutorial02.ogg"
LOAD_CC_DIALOGUE mickeyTutorialDialogue_2 "phase_3.5/audio/dial/CC_tom_tutorial_%s01.ogg"
LOAD_CC_DIALOGUE mickeyTutorialDialogue_3a "phase_3/audio/dial/CC_%s_tutorial03.ogg"
LOAD_CC_DIALOGUE mickeyTutorialDialogue_3b "phase_3/audio/dial/CC_%s_tutorial05.ogg"
LOAD_DIALOGUE mickeyTutorialDialogue_4 "phase_3.5/audio/dial/CC_tom_tutorial_mickey02.ogg"
LOCK_LOCALTOON
REPARENTTO camera render
POSHPRSCALE camera 11 7 3 52 0 0 1 1 1
LOAD_CLASSIC_CHAR classicChar
REPARENTTO classicChar render
POS classicChar 0 0 0
HPR classicChar 0 0 0
POS localToon 0 0 0
HPR localToon 0 0 0
WAIT 2
PLAY_SFX soundRun 1
LOOP_ANIM classicChar "run"
LOOP_ANIM localToon "run"
LERP_POS localToon -1.8 14.4 0 2
LERP_POS classicChar 0 17 0 2
WAIT 2
#LERP_HPR localToon -110 0 0 0.5
LERP_HPR localToon -70 0 0 0.5
LERP_HPR classicChar -120 0 0 0.5
WAIT 0.5
STOP_SFX soundRun
LOOP_ANIM localToon "neutral"
PLAY_ANIM classicChar "left-point-start" 1
WAIT 1.63
LOOP_ANIM classicChar "left-point"
CC_CHAT_CONFIRM classicChar "QuestScriptTutorial%s_1" mickeyTutorialDialogue_1
PLAY_ANIM classicChar "left-point-start" -1.5
WAIT 1.0867
LOOP_ANIM classicChar "neutral"
CC_CHAT_TO_CONFIRM npc classicChar "QuestScriptTutorial%s_2" "CFSpeech" mickeyTutorialDialogue_2
PLAY_ANIM classicChar "right-point-start" 1
WAIT 1.0867
LOOP_ANIM classicChar "right-point"
CC_CHAT_CONFIRM classicChar "QuestScriptTutorial%s_3" mickeyTutorialDialogue_3a mickeyTutorialDialogue_3b
PLAY_SFX soundRun 1
LOOP_ANIM classicChar "run"
LERP_HPR classicChar -180 0 0 0.5
WAIT 0.5
LERP_POS classicChar 0 0 0 2
WAIT 2
STOP_SFX soundRun
REPARENTTO classicChar hidden
UNLOAD_CHAR classicChar
#CHAT npc QuestScriptTutorialMickey_4 mickeyTutorialDialogue_4
REPARENTTO camera localToon
POS localToon 1.6 9.8 0
HPR localToon 14 0 0
FREE_LOCALTOON
LOCAL_CHAT_PERSIST npc QuestScriptTutorialMickey_4 mickeyTutorialDialogue_4
ID quest_assign_101
CLEAR_CHAT npc
LOAD squirt1 "phase_3.5/models/gui/tutorial_gui" "squirt1"
LOAD squirt2 "phase_3.5/models/gui/tutorial_gui" "squirt2"
LOAD toonBuilding "phase_3.5/models/gui/tutorial_gui" "toon_buildings"
LOAD cogBuilding "phase_3.5/models/gui/tutorial_gui" "suit_buildings"
LOAD cogs "phase_3.5/models/gui/tutorial_gui" "suits"
LOAD tart "phase_3.5/models/props/tart"
LOAD flower "phase_3.5/models/props/squirting-flower"
LOAD_DIALOGUE tomDialogue_01 "phase_3.5/audio/dial/CC_tom_tutorial_questscript01.ogg"
LOAD_DIALOGUE tomDialogue_02 "phase_3.5/audio/dial/CC_tom_tutorial_questscript03.ogg"
LOAD_DIALOGUE tomDialogue_03 "phase_3.5/audio/dial/CC_tom_tutorial_questscript04.ogg"
LOAD_DIALOGUE tomDialogue_04 "phase_3.5/audio/dial/CC_tom_tutorial_questscript05.ogg"
LOAD_DIALOGUE tomDialogue_05 "phase_3.5/audio/dial/CC_tom_tutorial_questscript06.ogg"
LOAD_DIALOGUE tomDialogue_06 "phase_3.5/audio/dial/CC_tom_tutorial_questscript07.ogg"
LOAD_DIALOGUE tomDialogue_07 "phase_3.5/audio/dial/CC_tom_tutorial_questscript08.ogg"
LOAD_DIALOGUE tomDialogue_08 "phase_3.5/audio/dial/CC_tom_tutorial_questscript09.ogg"
LOAD_DIALOGUE tomDialogue_09 "phase_3.5/audio/dial/CC_tom_tutorial_questscript10.ogg"
LOAD_DIALOGUE tomDialogue_10 "phase_3.5/audio/dial/CC_tom_tutorial_questscript11.ogg"
LOAD_DIALOGUE tomDialogue_11 "phase_3.5/audio/dial/CC_tom_tutorial_questscript12.ogg"
LOAD_DIALOGUE tomDialogue_12 "phase_3.5/audio/dial/CC_tom_tutorial_questscript13.ogg"
LOAD_DIALOGUE tomDialogue_13 "phase_3.5/audio/dial/CC_tom_tutorial_questscript14.ogg"
LOAD_DIALOGUE tomDialogue_14 "phase_3.5/audio/dial/CC_tom_tutorial_questscript16.ogg"
POSHPRSCALE cogs -1.05 7 0 0 0 0 1 1 1
POSHPRSCALE toonBuilding -1.05 7 0 0 0 0 1 1 1
POSHPRSCALE cogBuilding -1.05 7 0 0 0 0 1 1 1
POSHPRSCALE squirt1 -1.05 7 0 0 0 0 1 1 1
POSHPRSCALE squirt2 -1.05 7 0 0 0 0 1 1 1
REPARENTTO camera npc
POS camera -2.2 5.2 3.3
HPR camera 215 5 0
WRTREPARENTTO camera localToon
PLAY_ANIM npc "right-hand-start" 1
WAIT 1
REPARENTTO cogs aspect2d
LERP_SCALE cogs 1 1 1 0.5
WAIT 1.0833
LOOP_ANIM npc "right-hand" 1
FUNCTION npc "angryEyes"
FUNCTION npc "blinkEyes"
LOCAL_CHAT_CONFIRM npc QuestScript101_1 "CFSpeech" tomDialogue_01
LOCAL_CHAT_CONFIRM npc QuestScript101_2 "CFSpeech" tomDialogue_02
REPARENTTO cogs hidden
REPARENTTO toonBuilding camera
LOCAL_CHAT_CONFIRM npc QuestScript101_3 "CFSpeech" tomDialogue_03
REPARENTTO toonBuilding hidden
REPARENTTO cogBuilding camera
FUNCTION npc "sadEyes"
FUNCTION npc "blinkEyes"
LOCAL_CHAT_CONFIRM npc QuestScript101_4 "CFSpeech" tomDialogue_04
REPARENTTO cogBuilding hidden
REPARENTTO squirt1 camera
FUNCTION npc "normalEyes"
FUNCTION npc "blinkEyes"
LOCAL_CHAT_CONFIRM npc QuestScript101_5 "CFSpeech" tomDialogue_05
REPARENTTO squirt1 hidden
REPARENTTO squirt2 camera
LOCAL_CHAT_CONFIRM npc QuestScript101_6 "CFSpeech" tomDialogue_06
PLAY_ANIM npc 'right-hand-start' -1.8
LERP_SCALE squirt2 1 1 0.01 0.5
WAIT 0.5
REPARENTTO squirt2 hidden
WAIT 0.6574
LOOP_ANIM npc 'neutral' 1
LOCAL_CHAT_CONFIRM npc QuestScript101_7 "CFSpeech" tomDialogue_07
# Make it look like the client has no inventory. Since the toon.dc
# specifies that the user really does have 1 of each item, we will
# just put on a show for the client of not having any items then
# handing them out.
SET_INVENTORY 4 0 0
SET_INVENTORY 5 0 0
REPARENTTO inventory camera
SHOW inventory
SET_INVENTORY_DETAIL -1
POSHPRSCALE inventory -0.77 7.42 1.11 0 0 0 0.01 0.01 0.01
SET_INVENTORY_YPOS 4 0 -0.1
SET_INVENTORY_YPOS 5 0 -0.1
LERP_SCALE inventory 3 0.01 3 1
WAIT 1
REPARENTTO flower npc "**/1000/**/def_joint_right_hold"
POSHPRSCALE flower 0.10 -0.14 0.20 180.00 287.10 168.69 0.70 0.70 0.70
PLAY_ANIM npc 'right-hand-start' 1.8
WAIT 1.1574
LOOP_ANIM npc 'right-hand' 1.1
WAIT 0.8
WRTREPARENTTO flower camera
LERP_POSHPRSCALE flower -1.75 4.77 0.00 30.00 180.00 16.39 0.75 0.75 0.75 0.589
WAIT 1.094
LERP_POSHPRSCALE flower -1.76 7.42 -0.63 179.96 -89.9 -153.43 0.12 0.12 0.12 1
PLAY_ANIM npc 'right-hand-start' -1.5
WAIT 1
ADD_INVENTORY 5 0 1
POSHPRSCALE inventory -0.77 7.42 1.11 0 0 0 3 0.01 3
REPARENTTO flower hidden
REPARENTTO tart npc "**/1000/**/def_joint_right_hold"
POSHPRSCALE tart 0.19 0.02 0.00 0.00 0.00 349.38 0.34 0.34 0.34
PLAY_ANIM npc 'right-hand-start' 1.8
WAIT 1.1574
LOOP_ANIM npc 'right-hand' 1.1
WAIT 0.8
WRTREPARENTTO tart camera
LERP_POSHPRSCALE tart -1.37 4.56 0 329.53 39.81 346.76 0.6 0.6 0.6 0.589
WAIT 1.094
LERP_POSHPRSCALE tart -1.66 7.42 -0.36 0 30 30 0.12 0.12 0.12 1.0
PLAY_ANIM npc 'right-hand-start' -1.5
WAIT 1
ADD_INVENTORY 4 0 1
POSHPRSCALE inventory -0.77 7.42 1.11 0 0 0 3 0.01 3
REPARENTTO tart hidden
#PLAY_ANIM npc 'neutral' 1
#WAIT 2.0833
PLAY_ANIM npc 'right-hand-start' 1
WAIT 1.0
HIDE inventory
REPARENTTO inventory hidden
SET_INVENTORY_YPOS 4 0 0
SET_INVENTORY_YPOS 5 0 0
SET_INVENTORY_DETAIL 0
POSHPRSCALE inventory 0 0 0 0 0 0 1 1 1
OBSCURE_LAFFMETER 0
SHOW laffMeter
POS laffMeter 0 0 0
SCALE laffMeter 0.075 0.075 0.075
LERP_POS laffMeter 1.7 0 0.87 1
LERP_SCALE laffMeter 0.2 0.2 0.2 0.6
WAIT 1.0833
LOOP_ANIM npc "right-hand"
LOCAL_CHAT_CONFIRM npc QuestScript101_8 "CFSpeech" tomDialogue_08
LOCAL_CHAT_CONFIRM npc QuestScript101_9 "CFSpeech" tomDialogue_09
FUNCTION npc "sadEyes"
FUNCTION npc "blinkEyes"
LAFFMETER 15 15
WAIT 0.1
LAFFMETER 14 15
WAIT 0.1
LAFFMETER 13 15
WAIT 0.1
LAFFMETER 12 15
WAIT 0.1
LAFFMETER 11 15
WAIT 0.1
LAFFMETER 10 15
WAIT 0.1
LAFFMETER 9 15
WAIT 0.1
LAFFMETER 8 15
WAIT 0.1
LAFFMETER 7 15
WAIT 0.1
LAFFMETER 6 15
WAIT 0.1
LAFFMETER 5 15
WAIT 0.1
LAFFMETER 4 15
WAIT 0.1
LAFFMETER 3 15
WAIT 0.1
LAFFMETER 2 15
WAIT 0.1
LAFFMETER 1 15
WAIT 0.1
LAFFMETER 0 15
LOCAL_CHAT_CONFIRM npc QuestScript101_10 "CFSpeech" tomDialogue_10
FUNCTION npc "normalEyes"
FUNCTION npc "blinkEyes"
LAFFMETER 15 15
WAIT 0.5
LERP_POS laffMeter 0.15 0.15 0.15 1
LERP_SCALE laffMeter 0.085 0.085 0.085 0.6
PLAY_ANIM npc "right-hand-start" -2
WAIT 1.0625
LOOP_ANIM npc "neutral"
WAIT 0.5
LERP_HPR npc -50 0 0 0.5
FUNCTION npc "surpriseEyes"
FUNCTION npc "showSurpriseMuzzle"
PLAY_ANIM npc "right-point-start" 1.5
WAIT 0.6944
LOOP_ANIM npc "right-point"
LOCAL_CHAT_CONFIRM npc QuestScript101_11 "CFSpeech" tomDialogue_11
LOCAL_CHAT_CONFIRM npc QuestScript101_12 "CFSpeech" tomDialogue_12
PLAY_ANIM npc "right-point-start" -1
LERP_HPR npc -0.068 0 0 0.75
WAIT 1.0417
FUNCTION npc "angryEyes"
FUNCTION npc "blinkEyes"
FUNCTION npc "hideSurpriseMuzzle"
LOOP_ANIM npc "neutral"
FUNCTION localToon "questPage.showQuestsOnscreenTutorial"
LOCAL_CHAT_CONFIRM npc QuestScript101_13 "CFSpeech" tomDialogue_13
FUNCTION localToon "questPage.hideQuestsOnscreenTutorial"
LOCAL_CHAT_CONFIRM npc QuestScript101_14 1 "CFSpeech" tomDialogue_14
FUNCTION npc "normalEyes"
FUNCTION npc "blinkEyes"
# Cleanup
UPON_TIMEOUT FUNCTION tart "removeNode"
UPON_TIMEOUT FUNCTION flower "removeNode"
UPON_TIMEOUT FUNCTION cogs "removeNode"
UPON_TIMEOUT FUNCTION toonBuilding "removeNode"
UPON_TIMEOUT FUNCTION cogBuilding "removeNode"
UPON_TIMEOUT FUNCTION squirt1 "removeNode"
UPON_TIMEOUT FUNCTION squirt2 "removeNode"
UPON_TIMEOUT LOOP_ANIM npc "neutral"
UPON_TIMEOUT HIDE inventory
UPON_TIMEOUT SET_INVENTORY_DETAIL 0
UPON_TIMEOUT SHOW laffMeter
UPON_TIMEOUT POS laffMeter 0.15 0.15 0.15
UPON_TIMEOUT SCALE laffMeter 0.085 0.085 0.085
UPON_TIMEOUT POSHPRSCALE inventory 0 0 0 0 0 0 1 1 1
POS localToon 0.776 14.6 0
HPR localToon 47.5 0 0
FINISH_QUEST_MOVIE
# TUTORIAL HQ HARRY
ID quest_incomplete_110
DEBUG "quest assign 110"
LOAD_DIALOGUE harryDialogue_01 "phase_3.5/audio/dial/CC_harry_tutorial_questscript01.ogg"
LOAD_DIALOGUE harryDialogue_02 "phase_3.5/audio/dial/CC_harry_tutorial_questscript02.ogg"
LOAD_DIALOGUE harryDialogue_03 "phase_3.5/audio/dial/CC_harry_tutorial_questscript03.ogg"
LOAD_DIALOGUE harryDialogue_04 "phase_3.5/audio/dial/CC_harry_tutorial_questscript04.ogg"
LOAD_DIALOGUE harryDialogue_05 "phase_3.5/audio/dial/CC_harry_tutorial_questscript05.ogg"
LOAD_DIALOGUE harryDialogue_06 "phase_3.5/audio/dial/CC_harry_tutorial_questscript06.ogg"
LOAD_DIALOGUE harryDialogue_07 "phase_3.5/audio/dial/CC_harry_tutorial_questscript07.ogg"
LOAD_DIALOGUE harryDialogue_08 "phase_3.5/audio/dial/CC_harry_tutorial_questscript08.ogg"
LOAD_DIALOGUE harryDialogue_09 "phase_3.5/audio/dial/CC_harry_tutorial_questscript09.ogg"
LOAD_DIALOGUE harryDialogue_10 "phase_3.5/audio/dial/CC_harry_tutorial_questscript10.ogg"
LOAD_DIALOGUE harryDialogue_11 "phase_3.5/audio/dial/CC_harry_tutorial_questscript11.ogg"
SET_MUSIC_VOLUME 0.4 activityMusic 0.5 0.7
LOCAL_CHAT_CONFIRM npc QuestScript110_1 harryDialogue_01
OBSCURE_BOOK 0
SHOW bookOpenButton
LOCAL_CHAT_CONFIRM npc QuestScript110_2 harryDialogue_02
# ARROWS_ON 0.92 -0.89 0 1.22 -0.64 90
ARROWS_ON 1.364477 -0.89 0 1.664477 -0.64 90
LOCAL_CHAT_PERSIST npc QuestScript110_3 harryDialogue_03
WAIT_EVENT "enterStickerBook"
ARROWS_OFF
SHOW_BOOK
HIDE bookPrevArrow
HIDE bookNextArrow
CLEAR_CHAT npc
WAIT 0.5
TOON_HEAD npc -0.2 -0.45 1
LOCAL_CHAT_CONFIRM npc QuestScript110_4 harryDialogue_04
ARROWS_ON 0.85 -0.75 -90 0.85 -0.75 -90
SHOW bookNextArrow
LOCAL_CHAT_PERSIST npc QuestScript110_5 harryDialogue_05
WAIT_EVENT "stickerBookPageChange-4"
HIDE bookPrevArrow
HIDE bookNextArrow
ARROWS_OFF
CLEAR_CHAT npc
WAIT 0.5
LOCAL_CHAT_CONFIRM npc QuestScript110_6 harryDialogue_06
ARROWS_ON 0.85 -0.75 -90 0.85 -0.75 -90
SHOW bookNextArrow
LOCAL_CHAT_PERSIST npc QuestScript110_7 harryDialogue_07
WAIT_EVENT "stickerBookPageChange-5"
HIDE bookNextArrow
HIDE bookPrevArrow
ARROWS_OFF
CLEAR_CHAT npc
LOCAL_CHAT_CONFIRM npc QuestScript110_8 harryDialogue_08
LOCAL_CHAT_CONFIRM npc QuestScript110_9 harryDialogue_09
LOCAL_CHAT_PERSIST npc QuestScript110_10 harryDialogue_10
ENABLE_CLOSE_BOOK
ARROWS_ON 1.364477 -0.89 0 1.664477 -0.64 90
WAIT_EVENT "exitStickerBook"
ARROWS_OFF
TOON_HEAD npc 0 0 0
HIDE_BOOK
HIDE bookOpenButton
LOCAL_CHAT_CONFIRM npc QuestScript110_11 1 harryDialogue_11
SET_MUSIC_VOLUME 0.7 activityMusic 1.0 0.4
# Lots of cleanup
UPON_TIMEOUT DEBUG "testing upon death"
UPON_TIMEOUT OBSCURE_BOOK 0
UPON_TIMEOUT ARROWS_OFF
UPON_TIMEOUT HIDE_BOOK
UPON_TIMEOUT COLOR_SCALE bookOpenButton 1 1 1 1
UPON_TIMEOUT TOON_HEAD npc 0 0 0
UPON_TIMEOUT SHOW bookOpenButton
FINISH_QUEST_MOVIE
# TUTORIAL FLIPPY
ID tutorial_blocker
LOAD_DIALOGUE blockerDialogue_01 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker01.ogg"
LOAD_DIALOGUE blockerDialogue_02 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker02.ogg"
LOAD_DIALOGUE blockerDialogue_03 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker03.ogg"
LOAD_DIALOGUE blockerDialogue_04 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker04.ogg"
LOAD_DIALOGUE blockerDialogue_05a "phase_3.5/audio/dial/CC_flippy_tutorial_blocker05.ogg"
LOAD_DIALOGUE blockerDialogue_05b "phase_3.5/audio/dial/CC_flippy_tutorial_blocker06.ogg"
LOAD_DIALOGUE blockerDialogue_06 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker07.ogg"
LOAD_DIALOGUE blockerDialogue_07 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker08.ogg"
LOAD_DIALOGUE blockerDialogue_08 "phase_3.5/audio/dial/CC_flippy_tutorial_blocker09.ogg"
HIDE localToon
REPARENTTO camera npc
FUNCTION npc "stopLookAround"
#POS camera 0.0 6.0 4.0
#HPR camera 180.0 0.0 0.0
LERP_POSHPR camera 0.0 6.0 4.0 180.0 0.0 0.0 0.5
SET_MUSIC_VOLUME 0.4 music 0.5 0.8
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_1 blockerDialogue_01
WAIT 0.8
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_2 blockerDialogue_02
WAIT 0.8
#POS camera -5.0 -9.0 6.0
#HPR camera -25.0 -10.0 0.0
LERP_POSHPR camera -5.0 -9.0 6.0 -25.0 -10.0 0.0 0.5
POS localToon 203.8 18.64 -0.475
HPR localToon -90.0 0.0 0.0
SHOW localToon
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_3 blockerDialogue_03
OBSCURE_CHAT 1 0 0
SHOW chatScButton
WAIT 0.6
ARROWS_ON -1.3644 0.91 180 -1.5644 0.74 -90
LOCAL_CHAT_PERSIST npc QuestScriptTutorialBlocker_4 blockerDialogue_04
WAIT_EVENT "enterSpeedChat"
ARROWS_OFF
BLACK_CAT_LISTEN 1
WAIT_EVENT "SCChatEvent"
BLACK_CAT_LISTEN 0
WAIT 0.5
CLEAR_CHAT localToon
REPARENTTO camera localToon
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_5 "CFSpeech" blockerDialogue_05a blockerDialogue_05b
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_6 "CFSpeech" blockerDialogue_06
OBSCURE_CHAT 0 0 0
SHOW chatNormalButton
WAIT 0.6
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_7 "CFSpeech" blockerDialogue_07
LOCAL_CHAT_CONFIRM npc QuestScriptTutorialBlocker_8 1 "CFSpeech" blockerDialogue_08
SET_MUSIC_VOLUME 0.8 music 1.0 0.4
LOOP_ANIM npc "walk"
LERP_HPR npc 270 0 0 0.5
WAIT 0.5
LOOP_ANIM npc "run"
LERP_POS npc 217.4 18.81 -0.475 0.75
LERP_HPR npc 240 0 0 0.75
WAIT 0.75
LERP_POS npc 222.4 15.0 -0.475 0.35
LERP_HPR npc 180 0 0 0.35
WAIT 0.35
LERP_POS npc 222.4 5.0 -0.475 0.75
WAIT 0.75
REPARENTTO npc hidden
FREE_LOCALTOON
UPON_TIMEOUT ARROWS_OFF
UPON_TIMEOUT OBSCURE_CHAT 0 0 0
UPON_TIMEOUT REPARENTTO camera localToon
FINISH_QUEST_MOVIE
ID quest_incomplete_120
CHAT_CONFIRM npc QuestScript120_1
# ANIM
CHAT_CONFIRM npc QuestScript120_2 1
FINISH_QUEST_MOVIE
ID quest_assign_121
CHAT_CONFIRM npc QuestScript121_1 1
FINISH_QUEST_MOVIE
ID quest_assign_130
CHAT_CONFIRM npc QuestScript130_1 1
FINISH_QUEST_MOVIE
ID quest_assign_131
CHAT_CONFIRM npc QuestScript131_1 1
FINISH_QUEST_MOVIE
ID quest_assign_140
CHAT_CONFIRM npc QuestScript140_1 1
FINISH_QUEST_MOVIE
ID quest_assign_141
CHAT_CONFIRM npc QuestScript141_1 1
FINISH_QUEST_MOVIE
# TUTORIAL COG
ID quest_incomplete_145
CHAT_CONFIRM npc QuestScript145_1 1
LOAD frame "phase_4/models/gui/tfa_images" "FrameBlankA"
LOAD tunnel "phase_4/models/gui/tfa_images" "tunnelSignA"
POSHPRSCALE tunnel 0 0 0 0 0 0 0.8 0.8 0.8
REPARENTTO tunnel frame
POSHPRSCALE frame 0 0 0 0 0 0 0.1 0.1 0.1
REPARENTTO frame aspect2d
LERP_SCALE frame 1.0 1.0 1.0 1.0
WAIT 3.0
LERP_SCALE frame 0.1 0.1 0.1 0.5
WAIT 0.5
REPARENTTO frame hidden
CHAT_CONFIRM npc QuestScript145_2 1
UPON_TIMEOUT FUNCTION frame "removeNode"
FINISH_QUEST_MOVIE
# TUTORIAL FRIENDS
ID quest_incomplete_150
CHAT_CONFIRM npc QuestScript150_1
ARROWS_ON 1.65 0.51 -120 1.65 0.51 -120
SHOW_FRIENDS_LIST
CHAT_CONFIRM npc QuestScript150_2
ARROWS_OFF
HIDE_FRIENDS_LIST
CHAT_CONFIRM npc QuestScript150_3
HIDE bFriendsList
CHAT_CONFIRM | |
def flist(self):
return list(self._files.keys())
def unlink(self, path):
"Unlink (delete) the given file."
path = cygwin2nt(path)
return os.unlink(path)
def rename(self, src, dst):
"Rename file from src to dst."
src = cygwin2nt(src)
dst = cygwin2nt(dst)
return os.rename(src, dst)
# directory methods
def mkdir(self, path, mode=0o777):
"Make a directory."
path = cygwin2nt(path)
return os.mkdir(path, mode)
def makedirs(self, path, mode=0o777):
"Make a full path."
path = cygwin2nt(path)
return os.makedirs(path, mode)
def chdir(self, path):
path = cygwin2nt(path)
return os.chdir(path)
def rmdir(self, path):
"Delete a directory."
path = cygwin2nt(path)
return os.rmdir(path)
def getcwd(self):
return os.getcwd()
def getcwdu(self):
return os.getcwd()
def pushd(self, path=None):
self._dirstack.append(os.getcwd())
if path:
path = cygwin2nt(path)
os.chdir(path)
def popd(self):
try:
path = self._dirstack.pop()
except IndexError:
return None
else:
os.chdir(path)
return path
def listdir(self, path):
path = cygwin2nt(path)
return os.listdir(path)
ls = listdir
def listfiles(self, path):
path = cygwin2nt(path)
isfile = os.path.isfile
pjoin = os.path.join
rv = []
for fname in os.listdir(path):
if isfile(pjoin(path, fname)):
rv.append(nt2cygwin(fname))
return rv
def chmod(self, path, mode):
path = cygwin2nt(path)
return os.chmod(path, mode)
def chown(self, path, uid, gid):
path = cygwin2nt(path)
return os.chown(path, uid, gid)
def stat(self, path):
path = cygwin2nt(path)
return os.stat(path)
def statvfs(self, path):
path = cygwin2nt(path)
return os.statvfs(path)
# fd ops ruturn the file descript as handle (of course)
def open(self, fname, flags, mode=0o777):
fd = os.open(fname, mode)
return fd
def close(self, fd):
return os.close(fd)
def write(self, fd, data):
return os.write(fd, data)
def read(self, fd, n):
return os.read(fd, n)
# end fd ops
# shutil interface
def copyfile(self,src, dst):
return shutil.copyfile(src, dst)
def copymode(self, src, dst):
return shutil.copymode(src, dst)
def copystat(self, src, dst):
return shutil.copystat(src, dst)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def copytree(self, src, dst, symlinks=False):
return shutil.copytree(src, dst, symlinks)
def move(self, src, dst):
return win32file.MoveFile(str(src), str(dst))
def rmtree(self, path):
path = cygwin2nt(path)
for fname in os.listdir(path):
file_or_dir = os.path.join(path, fname)
if os.path.isdir(file_or_dir) and not os.path.islink(file_or_dir):
self.rmtree(file_or_dir) #it's a directory reucursive call to function again
else:
try:
os.remove(file_or_dir) #it's a file, delete it
except:
#probably failed because it is not a normal file
win32api.SetFileAttributes(file_or_dir, win32file.FILE_ATTRIBUTE_NORMAL)
os.remove(file_or_dir) #it's a file, delete it
os.rmdir(path) #delete the directory here
# os.path delegates
def exists(self, path):
path = cygwin2nt(path)
return os.path.exists(path)
def isabs(self, path):
path = cygwin2nt(path)
return os.path.isabs(path)
def isdir(self, path):
path = cygwin2nt(path)
return os.path.isdir(path)
def isfile(self, path):
path = cygwin2nt(path)
return os.path.isfile(path)
def islink(self, path):
path = cygwin2nt(path)
return os.path.islink(path)
def ismount(self, path):
path = cygwin2nt(path)
return os.path.ismount(path)
# process control, these calls are syncronous (they block)
def system(self, cmd):
UserLog.msg("system", cmd)
return os.system(cmd) # remember, stdout is on the server
def run(self, cmd, user=None):
if user is None:
return self.pipe(cmd)
else:
return self.run_as(cmd, user.name, user.passwd)
def run_async(self, cmd, user=None):
UserLog.msg("run_async", cmd, str(user))
proc = WindowsProcess(cmd, pwent=user)
self._procs[proc.childpid] = proc
return proc.childpid
def _get_process(self, pid):
return self._procs.get(pid, None)
def read_process(self, pid, N=-1):
proc = self._get_process(pid)
if proc:
return proc.read(N)
else:
return ''
def write_process(self, pid, data):
proc = self._get_process(pid)
if proc:
return proc.write(data)
def poll(self, pid):
"""Poll for async process. Returns exitstatus if done."""
try:
proc = self._procs[pid]
except KeyError:
return -errno.ENOENT
if proc.poll() is None:
return -errno.EAGAIN
else:
del self._procs[pid]
return proc.exitstatus
def waitpid(self, pid):
while True:
rv = self.poll(pid)
if rv == -errno.ENOENT:
return None
if rv == -errno.EAGAIN:
proc = self._procs[pid]
es = proc.wait()
del self._procs[pid]
return es
else: # already exited
del self._procs[pid]
return rv
def kill(self, pid):
"""Kills a process that was started by run_async."""
try:
proc = self._procs.pop(pid)
except KeyError:
return -errno.ENOENT
else:
proc.kill()
sts = proc.wait()
return sts
def killall(self):
rv = []
for pid in self._procs:
rv.append(self.kill(pid))
return rv
def plist(self):
return list(self._procs.keys())
def spawn(self, cmd, user=None, async=True):
# keep the "async" parameter for compatibility with the
# PosixServer.
if user:
cmd = ("runas /user:%s " % user) + cmd
UserLog.msg("spawn", cmd)
L = split_command_line(cmd)
pid = os.spawnv(os.P_DETACH, L[0], L)
return pid
def pipe(self, cmd):
UserLog.msg("pipe", cmd)
proc = os.popen(cmd, 'r')
text = proc.read()
sts = proc.close()
if sts is None:
sts = 0
return ExitStatus(cmd, sts), text
def python(self, snippet):
try:
code = compile(str(snippet) + '\n', '<WindowsServer>', 'eval')
rv = eval(code, globals(), vars(self))
except:
t, v, tb = sys.exc_info()
return '*** %s (%s)' % (t, v)
else:
return rv
def pyexec(self, snippet):
try:
code = compile(str(snippet) + '\n', '<WindowsServer>', 'exec')
exec(code, globals(), vars(self))
except:
t, v, tb = sys.exc_info()
return '*** %s (%s)' % (t, v)
else:
return
# method that exists just to check if everything is working
def alive(self):
return True
def suicide(self):
"Kill myself. The server manager will ressurect me. How nice."
global _EXIT
_EXIT = True
def clean(self):
self.chdir("C:\\tmp")
for f in self.flist():
try:
self.fclose(f)
except:
pass
for pid in self.plist():
try:
self.kill(pid)
except:
pass
def NetUseAdd(self, drive, share, username=None, domainname=None, password=<PASSWORD>):
"""Calls Windows API to map a drive. Note that this does not automatically use DFS."""
ui2={}
ui2['local'] = "%s:" % drive[0].upper()
ui2['remote'] = str(share) # \\servername\sharename
ui2['asg_type'] = USE_DISKDEV
if username:
ui2['username'] = str(username)
if domainname:
ui2['domainname'] = str(domainname)
if password:
ui2['password'] = str(password)
return win32net.NetUseAdd(None,2,ui2)
def NetUseDelete(self, drive, forcelevel=0):
"""Remove a mapped drive."""
ui2 = win32net.NetUseGetInfo(None, "%s:" % drive[0].upper(), 2)
return win32net.NetUseDel(None, ui2['remote'], max(0, min(forcelevel, 3)))
#win32net.USE_NOFORCE
#win32net.USE_FORCE
#win32net.USE_LOTS_OF_FORCE
def net_use(self, drive, share, user=None, domainname=None, password=None):
"""Map a drive on a Windows client using the *net* command."""
cmd = "net use %s: %s %s" % (drive[0].upper(), share, IF(password, password, ""))
if user:
cmd += " /USER:%s%s" % (IF(domainname, "%s\\"%domainname, ""), user)
return self.pipe(cmd)
def net_use_delete(self, drive):
"""Unmap a drive on a Windows client using the *net* command."""
cmd = "net use %s: /delete /y" % (drive[0].upper(),)
return self.pipe(cmd)
def md5sums(self, path):
"""Reads the md5sums.txt file in path and returns the number of files
checked good, then number bad (failures), and a list of the failures."""
from pycopia import md5lib
failures = []
counter = Counter()
md5lib.check_md5sums(path, failures.append, counter)
return counter.good, counter.bad, failures
def _get_home(self):
try: # F&*#!&@ windows
HOME = os.environ['USERPROFILE']
except KeyError:
try:
HOME = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"])
except KeyError:
HOME = "C:\\"
return HOME
def get_tarball(self, url):
self.pushd(self._get_home())
# the ncftpget will check if the file is current, will not download if not needed
exitstatus, out = self.pipe('wget -q "%s"' % (url,))
self.popd()
return exitstatus
def run_script(self, script):
"""Runs a script from a shell."""
name = os.path.join("c:\\", "tmp", "clnt%d.bat" % (os.getpid(),))
sfile = open(name, "w")
sfile.write(str(script))
sfile.write("\n") # just in case string has no newline at the end
sfile.close()
try:
sts, out = self.pipe(name)
finally:
os.unlink(name)
return ExitStatus("cmd.exe", sts), out
# for PosixServer duck typing
def mount(self, host, export, mountpoint):
"""Map a drive on a client. Same as mount on NFS. The mountpoint should
be a drive letter (without the colon). """
return self.net_use(mountpoint, r"\\%s\%s" % (host, export))
def umount(self, mountpoint):
"""Unmap a drive on a client."""
return self.net_use_delete(mountpoint)
def run_as(self, cmd, user, password):
cmd = 'runas /user:%s %s' % (user, cmd)
return self.pipe(cmd)
def get_short_pathname(self, path):
"""Get the short file name of path."""
path = cygwin2nt(path)
return win32api.GetShortPathName(path)
def win32(self, funcname, *args, **kwargs):
"""Generic interface to win32. Calls a win32api function by name."""
f = getattr(win32api, funcname)
return f(*args, **kwargs)
def hostname(self):
"""Returns the client hosts name."""
return win32api.GetComputerName()
# Windows file API interface
def CopyFile(self, src, dst):
src = cygwin2nt(src)
dst = cygwin2nt(dst)
return win32file.CopyFile(src, dst, 1)
def GetFileAttributes(self, name):
name = cygwin2nt(name)
return win32file.GetFileAttributes(name)
def GetFileAttributeFlags(self):
return {
"ARCHIVE":win32file.FILE_ATTRIBUTE_ARCHIVE,
"COMPRESSED":win32file.FILE_ATTRIBUTE_COMPRESSED,
"DIRECTORY":win32file.FILE_ATTRIBUTE_DIRECTORY,
"HIDDEN":win32file.FILE_ATTRIBUTE_HIDDEN,
"NORMAL":win32file.FILE_ATTRIBUTE_NORMAL,
"OFFLINE":win32file.FILE_ATTRIBUTE_OFFLINE,
"READONLY":win32file.FILE_ATTRIBUTE_READONLY,
"SYSTEM":win32file.FILE_ATTRIBUTE_SYSTEM,
"TEMPORARY":win32file.FILE_ATTRIBUTE_TEMPORARY,
}
def SetFileAttributes(self, name, flags):
name = cygwin2nt(name)
return win32file.SetFileAttributes(name, flags)
def add_share(self, pathname):
"""Create a new share on this server. A directory is also created. """
drive, sharename = os.path.split(pathname)
if not os.path.isdir(pathname):
os.mkdir(pathname)
shinfo={} # shinfo struct
shinfo['netname'] = sharename
shinfo['type'] = win32netcon.STYPE_DISKTREE
shinfo['remark'] = 'Testing share %s' % (sharename,)
shinfo['permissions'] = 0
shinfo['max_uses'] = -1
shinfo['current_uses'] = 0
shinfo['path'] = pathname
shinfo['passwd'] = ''
win32net.NetShareAdd(None,2,shinfo)
return sharename
def del_share(self, | |
accept a few camel-cased familyname exceptions,
# so this one should also be fine:
ttFont = TTFont(TEST_FILE("abeezee/ABeeZee-Regular.ttf"))
assert_PASS(check(ttFont),
"with a good camel-cased fontname...")
def NOT_IMPLEMENTED_test_check_name_postscriptname():
""" Check name table: POSTSCRIPT_NAME entries. """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/name/postscriptname")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "bad-entry"
# - PASS
def test_check_name_typographicfamilyname():
""" Check name table: TYPOGRAPHIC_FAMILY_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/typographicfamilyname")
# RIBBI fonts must not have a TYPOGRAPHIC_FAMILY_NAME entry
ttFont = TTFont(TEST_FILE("montserrat/Montserrat-BoldItalic.ttf"))
assert_PASS(check(ttFont),
f"with a RIBBI without nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...")
# so we add one and make sure the check reports the problem:
ttFont['name'].names[5].nameID = NameID.TYPOGRAPHIC_FAMILY_NAME # 5 is arbitrary here
assert_results_contain(check(ttFont),
FAIL, 'ribbi',
f'with a RIBBI that has got a nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...')
# non-RIBBI fonts must have a TYPOGRAPHIC_FAMILY_NAME entry
ttFont = TTFont(TEST_FILE("montserrat/Montserrat-ExtraLight.ttf"))
assert_PASS(check(ttFont),
f"with a non-RIBBI containing a nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...")
# set bad values on all TYPOGRAPHIC_FAMILY_NAME entries:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
ttFont['name'].names[i].string = "foo".encode(name.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'non-ribbi-bad-value',
'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entries...')
# remove all TYPOGRAPHIC_FAMILY_NAME entries
# by changing their nameid to something else:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
ttFont['name'].names[i].nameID = 255 # blah! :-)
assert_results_contain(check(ttFont),
FAIL, 'non-ribbi-lacks-entry',
f'with a non-RIBBI lacking a nameid={NameID.TYPOGRAPHIC_FAMILY_NAME} entry...')
def test_check_name_typographicsubfamilyname():
""" Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/typographicsubfamilyname")
RIBBI = "montserrat/Montserrat-BoldItalic.ttf"
NON_RIBBI = "montserrat/Montserrat-ExtraLight.ttf"
# Add incorrect TYPOGRAPHIC_SUBFAMILY_NAME entries to a RIBBI font
ttFont = TTFont(TEST_FILE(RIBBI))
ttFont['name'].setName("FOO",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
ttFont['name'].setName("BAR",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
assert_results_contain(check(ttFont),
FAIL, 'mismatch',
f'with a RIBBI that has got incorrect'
f' nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...')
assert_results_contain(check(ttFont),
FAIL, 'bad-win-name')
assert_results_contain(check(ttFont),
FAIL, 'bad-mac-name')
# non-RIBBI fonts must have a TYPOGRAPHIC_SUBFAMILY_NAME entry
ttFont = TTFont(TEST_FILE(NON_RIBBI))
assert_PASS(check(ttFont),
f'with a non-RIBBI containing a nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entry...')
# set bad values on the win TYPOGRAPHIC_SUBFAMILY_NAME entry:
ttFont = TTFont(TEST_FILE(NON_RIBBI))
ttFont['name'].setName("Generic subfamily name",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
assert_results_contain(check(ttFont),
FAIL, 'bad-typo-win',
f'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...')
# set bad values on the mac TYPOGRAPHIC_SUBFAMILY_NAME entry:
ttFont = TTFont(TEST_FILE(NON_RIBBI))
ttFont['name'].setName("Generic subfamily name",
NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
assert_results_contain(check(ttFont),
FAIL, 'bad-typo-mac',
f'with a non-RIBBI with bad nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entries...')
# remove all TYPOGRAPHIC_SUBFAMILY_NAME entries
ttFont = TTFont(TEST_FILE(NON_RIBBI))
win_name = ttFont['name'].getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
mac_name = ttFont['name'].getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME,
PlatformID.MACINTOSH,
MacintoshEncodingID.ROMAN,
MacintoshLanguageID.ENGLISH)
win_name.nameID = 254
if mac_name:
mac_name.nameID = 255
assert_results_contain(check(ttFont),
FAIL, 'missing-typo-win',
f'with a non-RIBBI lacking a nameid={NameID.TYPOGRAPHIC_SUBFAMILY_NAME} entry...')
# note: the check must not complain
# about the lack of a mac entry!
def test_check_name_copyright_length():
""" Length of copyright notice must not exceed 500 characters. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/copyright_length")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
good_entry = 'a' * 499
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = good_entry.encode(entry.getEncoding())
assert_PASS(check(ttFont),
'with 499-byte copyright notice string...')
good_entry = 'a' * 500
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = good_entry.encode(entry.getEncoding())
assert_PASS(check(ttFont),
'with 500-byte copyright notice string...')
bad_entry = 'a' * 501
for i, entry in enumerate(ttFont['name'].names):
if entry.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = bad_entry.encode(entry.getEncoding())
assert_results_contain(check(ttFont),
FAIL, 'too-long',
'with 501-byte copyright notice string...')
# TODO: Maybe skip this code-test if the service is offline?
# we could use pytest.mak.skipif here together with a piece of code that
# verifies whether or not the namecheck.fontdata.com website is online at the moment
def test_check_fontdata_namecheck():
""" Familyname is unique according to namecheck.fontdata.com """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fontdata_namecheck")
TIMEOUT_MSG = ("Sometimes namecheck.fontdata.com times out"
" and we don't want to stop running all the other"
" code tests. Unless you touched this portion of"
" the code, it is generaly safe to ignore this glitch.")
# We dont FAIL because this is meant as a merely informative check
# There may be frequent cases when fonts are being updated and thus
# already have a public family name registered on the
# namecheck.fontdata.com database.
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
INFO, 'name-collision',
'with an already used name...',
ignore_error=TIMEOUT_MSG)
# Here we know that FamilySans has not been (and will not be)
# registered as a real family.
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
assert_PASS(check(font),
'with a unique family name...',
ignore_error=TIMEOUT_MSG)
def test_check_fontv():
""" Check for font-v versioning """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fontv")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
assert_results_contain(check(ttFont),
INFO, 'bad-format',
'with a font that does not follow'
' the suggested font-v versioning scheme ...')
from fontv.libfv import FontVersion
fv = FontVersion(ttFont)
fv.set_state_git_commit_sha1(development=True)
version_string = fv.get_name_id5_version_string()
for record in ttFont['name'].names:
if record.nameID == NameID.VERSION_STRING:
record.string = version_string
assert_PASS(check(ttFont),
'with one that follows the suggested scheme ...')
def test_check_glyf_nested_components():
"""Check glyphs do not have nested components."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/glyf_nested_components")
ttFont = TTFont(TEST_FILE("nunito/Nunito-Regular.ttf"))
assert_PASS(check(ttFont))
# We need to create a nested component. "second" has components, so setting
# one of "quotedbl"'s components to "second" should do it.
ttFont['glyf']['quotedbl'].components[0].glyphName = "second"
assert_results_contain(check(ttFont),
FAIL, 'found-nested-components')
# Temporarily disabling this code-test since check/negative_advance_width itself
# is disabled waiting for an implementation targetting the
# actual root cause of the issue.
#
# See also comments at googlefons.py as well as at
# https://github.com/googlefonts/fontbakery/issues/1727
def disabled_test_check_negative_advance_width():
""" Check that advance widths cannot be inferred as negative. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/negative_advance_width")
# Our reference Cabin Regular is good
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# So it must PASS
assert_PASS(check(ttFont),
'with a good font...')
# We then change values in an arbitrary glyph
# in the glyf table in order to cause the problem:
glyphName = "J"
coords = ttFont["glyf"].glyphs[glyphName].coordinates
# FIXME:
# Note: I thought this was the proper way to induce the
# issue, but now I think I'll need to look more
# carefully at sample files providedby MarcFoley
# to see what's really at play here and how the relevant
# data is encoded into the affected OpenType files.
rightSideX = coords[-3][0]
# leftSideX: (make right minus left a negative number)
coords[-4][0] = rightSideX + 1
ttFont["glyf"].glyphs[glyphName].coordinates = coords
# and now this should FAIL:
assert_results_contain(check(ttFont),
FAIL, 'bad-coordinates',
'with bad coordinates on the glyf table...')
def test_check_varfont_generate_static():
""" Check a static ttf can be generated from a variable font. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont/generate_static")
ttFont = TTFont(TEST_FILE("cabinvfbeta/CabinVFBeta.ttf"))
assert_PASS(check(ttFont))
# Removing a table to deliberately break variable font
del ttFont['fvar']
assert_results_contain(check(ttFont),
FAIL, 'varlib-mutator')
def test_check_varfont_has_HVAR():
""" Check that variable fonts have an HVAR table. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/varfont/has_HVAR")
# Our reference Cabin Variable Font contains an HVAR table.
ttFont = TTFont(TEST_FILE("cabinvfbeta/CabinVFBeta.ttf"))
# So the check must PASS.
assert_PASS(check(ttFont))
# Introduce the problem by removing the HVAR table:
del ttFont['HVAR']
assert_results_contain(check(ttFont),
FAIL, 'lacks-HVAR')
def test_check_smart_dropout():
""" Font enables smart dropout control in "prep" table instructions? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/smart_dropout")
ttFont = TTFont(TEST_FILE("nunito/Nunito-Regular.ttf"))
# "Program at 'prep' table contains
# instructions enabling smart dropout control."
assert_PASS(check(ttFont))
# "Font does not contain TrueType instructions enabling
# smart dropout control in the 'prep' table program."
import array
ttFont["prep"].program.bytecode = array.array('B', [0])
assert_results_contain(check(ttFont),
FAIL, 'lacks-smart-dropout')
def test_check_vttclean():
""" There must not be VTT Talk sources in the font. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/vttclean")
good_font = TEST_FILE("mada/Mada-Regular.ttf")
assert_PASS(check(good_font))
bad_font = TEST_FILE("hinting/Roboto-VF.ttf")
assert_results_contain(check(bad_font),
FAIL, 'has-vtt-sources')
def test_check_aat():
""" Are there unwanted Apple tables ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/aat")
unwanted_tables = [
'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc',
'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just',
'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop',
'trak', 'xref'
]
# Our reference Mada Regular font is good here:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# We now add unwanted tables one-by-one to validate the FAIL code-path:
for unwanted in unwanted_tables:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
ttFont.reader.tables[unwanted] = "foo"
assert_results_contain(check(ttFont),
FAIL, 'has-unwanted-tables',
f'with unwanted table {unwanted} ...')
def test_check_fvar_name_entries():
""" All name entries referenced by fvar instances exist on the name table? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/fvar_name_entries")
# This broken version of the Expletus variable font, was where this kind of problem was first observed:
ttFont = TTFont(TEST_FILE("broken_expletus_vf/ExpletusSansBeta-VF.ttf"))
# So it must FAIL the check:
assert_results_contain(check(ttFont),
FAIL, 'missing-name',
'with a bad font...')
# If we add the name entry with id=265 (which was the one missing)
# then the check must now PASS:
from fontTools.ttLib.tables._n_a_m_e import makeName
ttFont["name"].names.append(makeName("Foo", 265, 1, 0, 0))
assert_PASS(check(ttFont),
'with a good font...')
def | |
<gh_stars>0
"""Jahnke, Student ID: 0808831
<EMAIL> / <EMAIL>
CSCI 160, Spring 2022, Lecture Sect 02, Lab Sect L03
Program 12, Part 2
Copyright (C) 2022 <NAME>
Assignment:
1. Write the required functions.
#. Prompt user for text file name containing menu data.
#. Display/manipulate the data as prescribed by required functions.
.. note::
Assignment instructions declare "avergePrice (theDictionary)". The
decision was made to correct the spelling error and it this program
write the function name as "averagePrice".
.. note::
DATA FORMAT - Menu items (keys): uppercase strings; prices: floats
(within dicts) or string depictions of floats (if in a file).
Deviation of this format may produce undesirable results.
.. note::
Function HEADERS are written exactly as specified per assignment
instructions, i.e., written in camelCase with whitespace between the
function's name and its parameters. camelCase persists for
declaration of variables for consistency. When a function is called,
the absence of whitespace between a function's name and its
arguments was judged to have no effect on performance; thus, such
whitespace is excluded outside of function headers.
Required Functions:
readMenuItems (fileName)
- Fills a dict with menu items and price data from fileName.
totalMenuItems (theDictionary)
- Calculates the total number of items available on menu
getMenuItems (theDictionary):
- Identify what items are on the menu.
getMenuItemsWithinRange (theDictionary, lowerLimit, upperLimit):
- Identifies menu items that are in specified price range.
addMenuItem(theDictionary, item, price):
- Adds an item to the menu.
updateMenuItem (theDictionary, item, price):
- Update the price of item.
getMenuItemPrice (theDictionary, item)
- Identifies the price of item.
averagePrice (theDictionary)
- Identifies the average price of items on the menu.
takeOrder (theDictionary)
- Create a mock order for a number of items from the menu.
printMenu (theDictionary)
- Prints a table of menu items and prices.
Discretionary Functions:
promptTextFileRead ()
- Prompts for desired text file name for use in read mode.
textFileToDict (fileName, kType="s", vType="s", delimiter=":")
- Reads fileName and converts its data into a dict.
"""
from os.path import isfile
from random import randint
from statistics import mean
# discretionary
def promptTextFileRead ():
"""Prompts for desired text file name for use in read mode.
:return: The name of a text file if it exists, else None
:rtype: str or None
"""
while True:
fileName = input("Text file name: ")
if fileName == "":
break
elif fileName[-4:] != ".txt":
print("File name must include \".txt\" extension.")
else:
if isfile(fileName):
return fileName
else:
print(fileName, "does not exist.\nInput another text "
"file name or press \"Enter\" to exit.")
# discretionary
def textFileToDict (fileName, kType="s", vType="s", delimiter=":"):
"""Reads fileName and converts its data into a dict.
Correct line data format: "KEY[delimiter]VALUE".
:param str fileName:
The text file to pull data from.
:param str kType:
The target data type of keys assigned to the dict: "s" for str;
"i" for int; "f" for float.
:param str vType:
The target data type of values assigned to the dict: "s" for
str; "i" for int; "f" for float.
:param str delimiter:
The character separating key data from value data.
:return:
A dictionary containing desired data.
:rtype: dict or None
:except ValueError:
Will return None if data is formatted incorrectly or data is
incapable of being converted to target kType/vType.
"""
dictionary = {}
validTypes = {"s": str, "i": int, "f": float}
if (
kType not in validTypes or
vType not in validTypes
):
return None
kType = validTypes[kType]
vType = validTypes[vType]
with open(fileName) as f:
for line in f:
# strips "\n"
line = line.strip()
try:
key, value = line.split(delimiter)
except ValueError:
return None
try:
if kType == int:
key = float(key)
key = kType(key)
else:
key = kType(key)
if vType == int:
value = float(value)
value = vType(value)
else:
value = vType(value)
except ValueError:
return None
dictionary[key] = value
return dictionary
def readMenuItems (fileName):
"""Fills a dict with menu items and price data from fileName.
This functions does no error checking as the assignment instructions
outline. However, its parent function (textFiletoDict) will return
to this function None if fileName is improperly formatted.
:param str fileName:
Text file containing lines of menu items followed by prices
separated by a tab character.
:return:
Dict with menu items as keys and prices as values.
:rtype: dict[str, float]
"""
dictionary = textFileToDict(fileName, "s", "f", "\t")
return dictionary
def totalMenuItems (theDictionary):
"""Calculates the total number of items available on menu
(theDictionary).
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:return:
Total number of items on the menu.
:rtype: int
"""
return len(theDictionary)
def getMenuItems (theDictionary):
"""Identify what items are on the menu.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:return:
A sorted list of menu items.
:rtype: list[str]
"""
items = sorted(list(theDictionary.keys()))
return items
def getMenuItemsWithinRange (theDictionary, lowerLimit, upperLimit):
"""Identifies menu items that are in specified price range.
Rounds lowerLimit and upperLimit to a valid dollar/cent value.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:param float or int lowerLimit:
The minimum price to check for menu items.
:param float or int upperLimit:
The maximum price to check for menu items.
:return:
The menu items whose prices are within the specified price
range.
:rtype:
"""
items = getMenuItems(theDictionary)
inRangeItems = []
for item in items:
if lowerLimit <= theDictionary[item] <= upperLimit:
inRangeItems.append(item)
return inRangeItems
def addMenuItem(theDictionary, item, price):
"""Adds an item to the menu.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:param str item:
The name of the new item to be added to the menu.
:param float or int price:
The price of item.
:return:
True if item added to the menu or False if item was already on
the menu.
:rtype: bool
"""
item = item.upper()
price = round(float(price), 2)
if item in theDictionary:
return False
theDictionary[item] = price
return True
def updateMenuItem (theDictionary, item, price):
"""Update the price of item.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:param str item:
The item whose price is to be updated.
:param float or int price:
The updated price of item.
:return:
True if item exists on the menu, else False.
:rtype: bool
"""
item = item.upper()
price = round(float(price), 2)
if item in theDictionary:
theDictionary[item] = price
return True
return False
# if item not in theDictionary:
# return False
#
# theDictionary[item] = price
#
# return True
def getMenuItemPrice (theDictionary, item):
"""Identifies the price of item.
Assumes items are written in uppercase.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:param str item:
The name of item to check the price of.
:return:
The price of item if in theDictionary, else None.
:rtype: float or None
"""
item = item.upper()
if item in theDictionary:
return theDictionary[item]
def averagePrice (theDictionary):
"""Identifies the average price of items on the menu.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:return:
The average of all prices rounded to 2 decimal places.
:rtype: float
"""
prices = list(theDictionary.values())
average = round(mean(prices), 2)
return average
def takeOrder (theDictionary):
"""Create a mock order for a number of items from the menu.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:except ValueError:
Quantities of items should be input as ints.
:rtype:
"""
# CAO 20220429
btcRate = 0.000026
orderItems = {}
total = 0.0
menuItems = list(theDictionary.keys())
print("Welcome to Burger Hut! Here is our menu:")
printMenu(theDictionary)
print("\nEnter desired menu items when prompted."
"Press \"Enter\" at the prompt to complete your order.")
while True:
if len(orderItems) == 0:
order = input("What can I get started for you today? "
"").upper().strip()
else:
order = input("What else can I get for you? ").upper().strip()
if order == "":
break
elif order not in theDictionary:
comebacks = [
f"{order} was a limited | |
<gh_stars>10-100
"""
FILENAME: controller.py
controller.py is the client and SUMO is the server
"""
""" DIRECTORIES & PATHS """
PORT = 8813
""" LIBRARIES """
import os
import sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
import subprocess
import random
import math
import pandas as pd
import numpy as np
from numpy import random
import numpy.matlib
import matplotlib.pyplot as plt
import ggplot as gg
import xml.etree.ElementTree as ET
from xml.dom import minidom
import heapq
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import arrivalRateGen
import getDiscreteStates
""" PARAMETERS """
secondsInHour = 60 * 60
hoursInDay = 24
secondsInDay = hoursInDay * secondsInHour
SL = "65546898" # ID of stoplight
# discretization parameters
numPhasesForAction = 4 # 8 including the yellow phases
actionPhases = [0,2,4,6] # even phases are all yellow phases (0,2,4,6)
numEdges = 4
numLanes = 8
minPhaseTime = 4
maxPhaseTime = 36
yellowPhaseTime = 4
actionsForStraightPhase = [10, 14, 18, 22, 26, 30, 34, 38] # number of seconds to run an actionPhase
actionsForLeftPhase = [3,4,5,6,7,8,9,10]
global numActions
numActions = len(actionsForStraightPhase)
numActionsForStraightPhase = len(actionsForStraightPhase) #10
numActionsForLeftPhase = len(actionsForLeftPhase)
""" STATE DISCRETIZATION / INITIAL LEARNING """
getDiscreteStates.learnDiscretization(3)
getDiscreteStates.plotClusterHistograms()
dictClusterObjects = getDiscreteStates.getDictClusterObjects() # IN: hour (int), phase (int); OUT: stateSubID
mapDiscreteStates = getDiscreteStates.getMapDiscreteStates() # IN, hour (int), phase (int), stateSubID (int); OUT: stateID
invMapDicreteStates = getDiscreteStates.getInvMapDiscreteStates()
numClustersTracker = getDiscreteStates.getNumClustersTracker()
numStates = 0
for h in range(hoursInDay):
for a in actionPhases:
numStates += numClustersTracker[h][a]
#print 'stateCounter = ', numStates
global baselineMean, baselineMedian, baselineMin
baselineMean = getDiscreteStates.getBaselineMean()
#print 'baselineMean = ', baselineMean
baselineMedian = getDiscreteStates.getBaselineMedian()
#print 'baselineMedian = ', baselineMedian
baselineMin = getDiscreteStates.getBaselineMin()
#print 'baselineMin = ', baselineMin
""" COLLECTIONS """
global QValues, QCounts, QProbs, QAlphas
QValues = np.zeros((numStates,numActions)) # all state action pairs
QCounts = np.zeros((numStates, numActions)) # some of these will always be null
QProbs = np.ones((numStates,numActions)) / numActions # given state s, what is the probability that I should take the next action
# TODO - we need to apply some priors here by preventing certain actions from
# being taken
QAlphas = np.ones((numStates, numActions))
# same thing here;
# two lanes for each edge
listLanes = ['8949170_0', '8949170_1', \
'-164126513_0', '-164126513_1',\
'52016249_0', '52016249_1',\
'-164126511_0', '-164126511_1']
listEdges = ['8949170', '-164126513', '52016249', '-164126511']
tupEdges = ('8949170', '-164126513', '52016249', '-164126511')
# (south (palm), north (palm), west (arboretum), east (arboretum))
laneQueueTracker = {}
laneWaitingTracker = {}
laneNumVehiclesTracker = {}
for lane in listLanes:
laneQueueTracker[lane] = 0
laneWaitingTracker[lane] = 0
laneNumVehiclesTracker[lane] = 0
queueTracker = {}
waitingTracker = {}
numVehiclesTracker = {}
for edge in listEdges:
queueTracker[edge] = 0
waitingTracker[edge] = 0
numVehiclesTracker[edge] = 0
stateCols = ('phase', '8949170_q', '8949170_w', '-164126513_q', '-164126513_w',\
'52016249_q', '52016249_w', '-164126511_q','-164126511_w')
""" HELPER FUNCTIONS """
def computeObjValue(queueTracker, waitingTracker):
currObjValue = 0
for key in listEdges:
currObjValue -= ((1 * queueTracker[key]) ** 1.75 + (2 * waitingTracker[key]) ** 1.75) #TODO - include waitingTracker
return currObjValue
# TODO - these are the values for beta and theta that we need to select for the
# objective function
# plot this on the 3D plot and see if it makes sense to the decision maker
def getStateID(currHod, phase, queueTracker, numVehiclesTracker, waitingTracker):
stateData = []
for edge in listEdges:
stateData.append(queueTracker[edge])
for edge in listEdges:
stateData.append(numVehiclesTracker[edge])
for edge in listEdges:
stateData.append(waitingTracker[edge])
stateData = np.array(stateData)
# #print 'stateData = ', stateData
stateSubID = int(dictClusterObjects[currHod][phase].predict(stateData))
# #print 'subStateID = ', stateSubID
stateID = mapDiscreteStates[currHod][phase][stateSubID]
# #print 'stateID = ', stateID
return stateID
# Q-value update
def updateQValues(lastStateID, lastAction, currStateID, reward):
QValues[lastStateID, lastAction] = (1 - QAlphas[lastStateID, lastAction]) * QValues[lastStateID, lastAction] + QAlphas[lastStateID, lastAction] * (reward + gamma * max(QValues[currStateID,]))
QCounts[lastStateID, lastAction] += 1
# #print 'QAlphas[lastStateID, lastAction] before = ', QAlphas[lastStateID,
# lastAction]
QAlphas[lastStateID, lastAction] = 1 / (QCounts[lastStateID, lastAction])
# #print 'QAlphas[lastStateID, lastAction] after = ', QAlphas[lastStateID,
# lastAction]
def updateQProbs(lastStateID, lastAction):
# #print 'np.sum(QCounts[lastStateID,]) = ', np.sum(QCounts[lastStateID,])
# #print 'np.sum(QCounts[lastStateID,]) = ', np.sum(QCounts[lastStateID,])
# #print 'np.sum(QValues[lastStateID,]) = ', np.sum(QValues[lastStateID,])
if np.sum(QCounts[lastStateID,]) == 0 or np.sum(QValues[lastStateID,]) == 0:
tau = 1
else:
# #print '(-(np.mean(QValues[lastStateID,]))) = ',
# (-(np.mean(QValues[lastStateID,])))
# #print '(np.mean(QCounts[lastStateID,])) = ',
# (np.mean(QCounts[lastStateID,]))
tau = (-(np.mean(QValues[lastStateID,]))) / (np.mean(QCounts[lastStateID,]))
# #print 'tau = ', tau
numerator = np.exp(QValues[lastStateID,] / tau)
tempSum = np.sum(numerator)
denominator = np.array([tempSum, tempSum, tempSum, tempSum, tempSum, tempSum, tempSum, tempSum])
QProbs[lastStateID,] = np.divide(numerator, denominator)
# initial dataframes which will be able to store performance data over
# different days
dfObjValsMaster = pd.DataFrame()
dfObjValsSummaryMaster = pd.DataFrame
dfQueueTrackerMaster = pd.DataFrame()
dfWaitingTrackerMaster = pd.DataFrame()
# check to see what the actions are
dfActions = pd.DataFrame()
""" SIMULATION """
currSod = 0
currPhaseID = 0
secsThisPhase = 0
# state objects and boolean helpers
phaseNum = 0
lastObjValue = 0
lastAction = 0
arrivalTracker = 0
waitingTime = 0
currStateID = 0
lastStateID = 0
dynamic = 1
totalDays = 60
# learning rates and discount factors
gamma = 0.95 # to do - drop gamma down a little bit?
for day in range(totalDays):
# generate the random route schedule for the day
arrivalRateGen.writeRoutes(day + 1)
sumoProcess = subprocess.Popen(['sumo-gui.exe', "-c", "palm.sumocfg", \
"--remote-port", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
# # sumoProcess = subprocess.Popen(['sumo.exe', "-c", "palm.sumocfg",
# "--fcd-output", \
# # "out.fcd.xml", "--tripinfo-output", "out.trip.xml", "--summary",
# "out.summary.xml", "--queue-output", "out.queue.xml", "--remote-port",
# str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
traci.init(PORT)
dfObjVals = pd.DataFrame()
dfObjValsMasterSummary = pd.DataFrame
dfQueueTracker = pd.DataFrame()
dfWaitingTracker = pd.DataFrame()
dfNumVehiclesTracker = pd.DataFrame()
dfActions = pd.DataFrame()
lastAction = 0
currHod = 0
currSod = 0
while currSod < secondsInDay:
# update currHod
if currHod != currSod/secondsInHour:
currHod = int(currSod / secondsInHour)
#print 'training day = ', day
#print 'currHod = ', currHod
# DETERMINE IF ITS TIME TO MAKE A DECISION
if currPhaseID == int(traci.trafficlights.getPhase(SL)) and currSod != 0: # if phase HAS NOT changed
secsThisPhase += 1 # increase the seconds in the currentPhase
else: # IF THE PHASE HAS CHANGED
secsThisPhase = 0
currPhaseID = int(traci.trafficlights.getPhase(SL))
# just came out of yellow and about to start our new phase;
# we need to collect our reward from the last decision
# and make a new decision about how long we want this phase to be
if currPhaseID % 2 == 0 and secsThisPhase == 0: # only collecting data when we come to the end of a yellow phase
# update our trackers so we know what state the environment is in
# TODO - later come back and have the updates detect the state only according to what we think the environment is; we would also have to update the getDiscreteStates.py file
#================= COUNT HALTED VEHICLES (I.E. QUEUE SIZE) (4 elements)
for lane in listLanes:
laneQueueTracker[lane] = traci.lane.getLastStepHaltingNumber(str(lane))
for edge in queueTracker.keys():
queueTracker[edge] = laneQueueTracker[str(edge) + '_' + str(0)] + laneQueueTracker[str(edge) + '_' + str(1)]
# TOTAL CARS IN QUEUE; DOESN'T REALLY COMPUTE THE LENGTH
df = pd.DataFrame([[currSod, queueTracker['8949170'], queueTracker['-164126513'], queueTracker['52016249'], queueTracker['-164126511']]])
dfQueueTracker = dfQueueTracker.append(df, ignore_index = True)
# ================ count vehicles in lane
for lane in listLanes:
laneNumVehiclesTracker[lane] = traci.lane.getLastStepVehicleNumber(str(lane))
for edge in numVehiclesTracker.keys():
numVehiclesTracker[edge] = laneNumVehiclesTracker[str(edge) + '_' + str(0)] + laneNumVehiclesTracker[str(edge) + '_' + str(1)]
df = pd.DataFrame([[currSod, numVehiclesTracker['8949170'], numVehiclesTracker['-164126513'], numVehiclesTracker['52016249'], numVehiclesTracker['-164126511']]])
dfNumVehiclesTracker = dfNumVehiclesTracker.append(df, ignore_index = True)
# ================ cum waiting time in minutes
for lane in listLanes:
laneWaitingTracker[lane] = traci.lane.getWaitingTime(str(lane)) / 60
for edge in waitingTracker.keys():
waitingTracker[edge] = laneWaitingTracker[str(edge) + '_' + str(0)] + laneWaitingTracker[str(edge) + '_' + str(1)]
df = pd.DataFrame([[currSod, waitingTracker['8949170'], waitingTracker['-164126513'], waitingTracker['52016249'], waitingTracker['-164126511']]])
dfWaitingTracker = dfWaitingTracker.append(df, ignore_index = True)
# GET THE stateID we are in right now
currStateID = getStateID(currHod, currPhaseID, queueTracker, numVehiclesTracker, waitingTracker)
# collect the currObjValue (reward) based on the current state; larger is
# better
currObjValue = computeObjValue(queueTracker, waitingTracker)
df = pd.DataFrame([[currSod, currObjValue]])
dfObjVals = dfObjVals.append(df, ignore_index=True)
if dynamic and currSod != 0:
# CONTROL ACTION
# #print 'currPhaseID = ', currPhaseID
# #print 'currStateID = ', currStateID
# #print 'secsThisPhase = ', secsThisPhase
# #print 'currHod = ', currHod
# #print 'currSod = ', currSod
# #print 'queueTracker = ', queueTracker
# #print 'numVehiclesTracker = ', numVehiclesTracker
# #print 'waitingTracker = ', waitingTracker
# #print 'lastObjValue = ', lastObjValue
# #print 'lastAction = ', lastAction
# record the state -> action -> state combination which led to a particular
# reward in the QTable
# #print 'QCounts[lastStateID, ] before = ', QCounts[lastStateID, ]
# #print 'QAlphas[lastStateID, ] before = ', QAlphas[lastStateID, ]
# #print 'QValues[lastStateID, ] before = ', QValues[lastStateID, ]
updateQValues(lastStateID, lastAction, currStateID, lastObjValue)
# #print 'QCounts[lastStateID, ] after = ', QCounts[lastStateID, ]
# #print 'QAlphas[lastStateID, ] after = ', QAlphas[lastStateID, ]
# #print 'QValues[lastStateID, ] after = ', QValues[lastStateID, ]
# with QValues updated, update our probability distribution for picking
# certain | |
<reponame>totologic/NaoRemoteCsharp
"""Multiple-producer-multiple-consumer signal-dispatching
dispatcher is the core of the PyDispatcher system,
providing the primary API and the core logic for the
system.
Module attributes of note:
Any -- Singleton used to signal either "Any Sender" or
"Any Signal". See documentation of the _Any class.
Anonymous -- Singleton used to signal "Anonymous Sender"
See documentation of the _Anonymous class.
Internal attributes:
WEAKREF_TYPES -- tuple of types/classes which represent
weak references to receivers, and thus must be de-
referenced on retrieval to retrieve the callable
object
connections -- { senderkey (id) : { signal : [receivers...]}}
senders -- { senderkey (id) : weakref(sender) }
used for cleaning up sender references on sender
deletion
sendersBack -- { receiverkey (id) : [senderkey (id)...] }
used for cleaning up receiver references on receiver
deletion, (considerably speeds up the cleanup process
vs. the original code.)
"""
from __future__ import generators
import weakref
from pydispatch import saferef, robustapply, errors
__author__ = "<NAME> <<EMAIL>>"
__cvsid__ = "$Id: dispatcher.py,v 1.1 2010/03/30 15:45:55 mcfletch Exp $"
__version__ = "$Revision: 1.1 $"[11:-2]
class _Parameter:
"""Used to represent default parameter values."""
def __repr__(self):
return self.__class__.__name__
class _Any(_Parameter):
"""Singleton used to signal either "Any Sender" or "Any Signal"
The Any object can be used with connect, disconnect,
send, or sendExact to signal that the parameter given
Any should react to all senders/signals, not just
a particular sender/signal.
"""
Any = _Any()
class _Anonymous(_Parameter):
"""Singleton used to signal "Anonymous Sender"
The Anonymous object is used to signal that the sender
of a message is not specified (as distinct from being
"any sender"). Registering callbacks for Anonymous
will only receive messages sent without senders. Sending
with anonymous will only send messages to those receivers
registered for Any or Anonymous.
Note:
The default sender for connect is Any, while the
default sender for send is Anonymous. This has
the effect that if you do not specify any senders
in either function then all messages are routed
as though there was a single sender (Anonymous)
being used everywhere.
"""
Anonymous = _Anonymous()
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
connections = {}
senders = {}
sendersBack = {}
def connect(receiver, signal=Any, sender=Any, weak=True):
"""Connect receiver to sender for signal
receiver -- a callable Python object which is to receive
messages/signals/events. Receivers must be hashable
objects.
if weak is True, then receiver must be weak-referencable
(more precisely saferef.safeRef() must be able to create
a reference to the receiver).
Receivers are fairly flexible in their specification,
as the machinery in the robustApply module takes care
of most of the details regarding figuring out appropriate
subsets of the sent arguments to apply to a given
receiver.
Note:
if receiver is itself a weak reference (a callable),
it will be de-referenced by the system's machinery,
so *generally* weak references are not suitable as
receivers, though some use might be found for the
facility whereby a higher-level library passes in
pre-weakrefed receiver references.
signal -- the signal to which the receiver should respond
if Any, receiver will receive any signal from the
indicated sender (which might also be Any, but is not
necessarily Any).
Otherwise must be a hashable Python object other than
None (DispatcherError raised on None).
sender -- the sender to which the receiver should respond
if Any, receiver will receive the indicated signals
from any sender.
if Anonymous, receiver will only receive indicated
signals from send/sendExact which do not specify a
sender, or specify Anonymous explicitly as the sender.
Otherwise can be any python object.
weak -- whether to use weak references to the receiver
By default, the module will attempt to use weak
references to the receiver objects. If this parameter
is false, then strong references will be used.
returns None, may raise DispatcherTypeError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender)
)
if weak:
receiver = saferef.safeRef(receiver, onDelete=_removeReceiver)
senderkey = id(sender)
if senderkey in connections:
signals = connections[senderkey]
else:
connections[senderkey] = signals = {}
# Keep track of senders for cleanup.
# Is Anonymous something we want to clean up?
if sender not in (None, Anonymous, Any):
def remove(object, senderkey=senderkey):
_removeSender(senderkey=senderkey)
# Skip objects that can not be weakly referenced, which means
# they won't be automatically cleaned up, but that's too bad.
try:
weakSender = weakref.ref(sender, remove)
senders[senderkey] = weakSender
except:
pass
receiverID = id(receiver)
# get current set, remove any current references to
# this receiver in the set, including back-references
if signal in signals:
receivers = signals[signal]
_removeOldBackRefs(senderkey, signal, receiver, receivers)
else:
receivers = signals[signal] = []
try:
current = sendersBack.get( receiverID )
if current is None:
sendersBack[ receiverID ] = current = []
if senderkey not in current:
current.append(senderkey)
except:
pass
receivers.append(receiver)
def disconnect(receiver, signal=Any, sender=Any, weak=True):
"""Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect
signal -- the registered signal to disconnect
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
disconnect reverses the process of connect,
the semantics for the individual elements are
logically equivalent to a tuple of
(receiver, signal, sender, weak) used as a key
to be deleted from the internal routing tables.
(The actual process is slightly more complex
but the semantics are basically the same).
Note:
Using disconnect is not required to cleanup
routing when an object is deleted, the framework
will remove routes for deleted objects
automatically. It's only necessary to disconnect
if you want to stop routing to a live object.
returns None, may raise DispatcherTypeError or
DispatcherKeyError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender)
)
if weak: receiver = saferef.safeRef(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise errors.DispatcherKeyError(
"""No receivers found for signal %r from sender %r""" %(
signal,
sender
)
)
try:
# also removes from receivers
_removeOldBackRefs(senderkey, signal, receiver, receivers)
except ValueError:
raise errors.DispatcherKeyError(
"""No connection to receiver %s for signal %s from sender %s""" %(
receiver,
signal,
sender
)
)
_cleanupConnections(senderkey, signal)
def getReceivers( sender = Any, signal = Any ):
"""Get list of receivers from global tables
This utility function allows you to retrieve the
raw list of receivers from the connections table
for the given sender and signal pair.
Note:
there is no guarantee that this is the actual list
stored in the connections table, so the value
should be treated as a simple iterable/truth value
rather than, for instance a list to which you
might append new records.
Normally you would use liveReceivers( getReceivers( ...))
to retrieve the actual receiver objects as an iterable
object.
"""
try:
return connections[id(sender)][signal]
except KeyError:
return []
def liveReceivers(receivers):
"""Filter sequence of receivers to get resolved, live receivers
This is a generator which will iterate over
the passed sequence, checking for weak references
and resolving them, then returning all live
receivers.
"""
for receiver in receivers:
if isinstance( receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
yield receiver
else:
yield receiver
def getAllReceivers( sender = Any, signal = Any ):
"""Get list of all receivers from global tables
This gets all receivers which should receive
the given signal from sender, each receiver should
be produced only once by the resulting generator
"""
receivers = {}
for set in (
# Get receivers that receive *this* signal from *this* sender.
getReceivers( sender, signal ),
# Add receivers that receive *any* signal from *this* sender.
getReceivers( sender, Any ),
# Add receivers that receive *this* signal from *any* sender.
getReceivers( Any, signal ),
# Add receivers that receive *any* signal from *any* sender.
getReceivers( Any, Any ),
):
for receiver in set:
if receiver: # filter out dead instance-method weakrefs
try:
if receiver not in receivers:
receivers[receiver] = 1
yield receiver
except TypeError:
# dead weakrefs raise TypeError on hash...
pass
def send(signal=Any, sender=Anonymous, *arguments, **named):
"""Send signal from sender to all connected receivers.
signal -- (hashable) signal value, see connect for details
sender -- the sender of the signal
if Any, only receivers registered for Any will receive
the message.
if Anonymous, only receivers registered to receive
messages from Anonymous or Any will receive the message
Otherwise can be any python object (normally one
registered with a connect if you actually want
something to occur).
arguments -- positional arguments which will be passed to
*all* receivers. Note that this may raise TypeErrors
if the receivers do not allow the particular arguments.
Note also that arguments are applied before named
arguments, so they should be used with care.
named -- named arguments which will be filtered according
to the parameters of the receivers to only provide those
acceptable to the receiver.
Return a list of tuple pairs [(receiver, response), ... ]
if any receiver raises an error, the error propagates back
through send, terminating the dispatch loop, so it is quite
possible to not have all receivers called if a raises an
error.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
response = robustapply.robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def sendExact( signal=Any, sender=Anonymous, *arguments, **named ):
"""Send signal only to those receivers registered for exact message
sendExact allows for avoiding Any/Anonymous registered
handlers, sending only to those receivers explicitly
registered for a particular signal on a particular
sender.
"""
responses = []
for receiver in liveReceivers(getReceivers(sender, signal)):
response = robustapply.robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def _removeReceiver(receiver):
"""Remove receiver from connections."""
if not sendersBack:
# During module cleanup the mapping will | |
not hasattr(self.dll, "IW_IncrementTime"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_IncrementTime")
)
# check that date is valid
self._validate_iwfm_date(date_string)
# check that time_interval is a valid IWFM time_interval
self._validate_time_interval(time_interval)
# convert date_string to ctypes character array
date_string = ctypes.create_string_buffer(date_string.encode("utf-8"))
# get length of iwfm_date string
len_date_string = ctypes.c_int(ctypes.sizeof(date_string))
# convert time_interval to ctypes character array
time_interval = ctypes.create_string_buffer(time_interval.encode("utf-8"))
# get lengh of time_interval
len_time_interval = ctypes.c_int(ctypes.sizeof(time_interval))
# convert n_intervals to ctypes
n_intervals = ctypes.c_int(n_intervals)
# initialize output variables
status = ctypes.c_int(-1)
self.dll.IW_IncrementTime(
ctypes.byref(len_date_string),
date_string,
ctypes.byref(len_time_interval),
time_interval,
ctypes.byref(n_intervals),
ctypes.byref(status),
)
return date_string.value.decode("utf-8")
def is_date_greater(self, first_date, comparison_date):
"""returns True if first_date is greater than comparison_date
Parameters
----------
first_date : str
IWFM date format MM/DD/YYYY_HH:MM
comparison_date : str
IWFM date format MM/DD/YYYY_HH:MM
Returns
-------
boolean
True if first_date is greater (in the future) when compared to the comparison_date
False if first_date is less (in the past) when compared to the comparison_date
See Also
--------
IWFMModel.get_current_date_and_time : returns the current simulation date and time
IWFMModel.get_n_time_steps : returns the number of timesteps in an IWFM simulation
IWFMModel.get_time_specs : returns the IWFM simulation dates and time step
IWFMModel.get_n_intervals : returns the number of time intervals between a provided start date and end date
IWFMModel.get_output_interval : returns a list of the possible time intervals a selected time-series data can be retrieved at.
IWFMModel.increment_time : increments the date provided by the specified time interval
Examples
--------
>>> from pywfm import IWFMModel
>>> dll = '../../DLL/Bin/IWFM2015_C_x64.dll'
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, preprocessor_infile, simulation_infile)
>>> model.is_date_greater('09/30/2011_24:00', '10/01/2011_24:00')
False
>>> model.kill()
>>> from pywfm import IWFMModel
>>> dll = '../../DLL/Bin/IWFM2015_C_x64.dll'
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, preprocessor_infile, simulation_infile)
>>> model.is_date_greater('03/28/2001_24:00', '06/30/1989_24:00')
True
>>> model.kill()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_IsTimeGreaterThan"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_IsTimeGreaterThan")
)
# check that first_date is valid
self._validate_iwfm_date(first_date)
# check that comparison_date is valid
self._validate_iwfm_date(comparison_date)
# convert begin and end dates to ctypes character arrays
first_date = ctypes.create_string_buffer(first_date.encode("utf-8"))
comparison_date = ctypes.create_string_buffer(comparison_date.encode("utf-8"))
# set length of IWFM date
length_dates = ctypes.c_int(ctypes.sizeof(first_date))
# initialize output variables
compare_result = ctypes.c_int(0)
status = ctypes.c_int(-1)
self.dll.IW_IsTimeGreaterThan(
ctypes.byref(length_dates),
first_date,
comparison_date,
ctypes.byref(compare_result),
ctypes.byref(status),
)
if compare_result.value == -1:
is_greater = False
elif compare_result.value == 1:
is_greater = True
return is_greater
def set_log_file(self, file_name="message.log"):
"""opens a text log file to print out error and warning messages
Parameters
----------
file_name : str, default='message.log'
name of the log file used to write error and warning messages
Returns
-------
None
opens the log file
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_SetLogFile"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_SetLogFile")
)
# convert file_name to ctypes character array
file_name = ctypes.create_string_buffer(file_name.encode("utf-8"))
# get length of file_name
len_file_name = ctypes.c_int(ctypes.sizeof(file_name))
# initialize output variables
status = ctypes.c_int(-1)
self.dll.IW_SetLogFile(
ctypes.byref(len_file_name), file_name, ctypes.byref(status)
)
def close_log_file(self):
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_CloseLogFile"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_CloseLogFile")
)
# initialize output variables
status = ctypes.c_int(-1)
self.dll.IW_CloseLogFile(ctypes.byref(status))
def get_last_message(self):
"""the error message in case a procedure call from IWFM API
returns an error code (status) other than 0
Returns
-------
str
error message for the procedure if it returns an error code
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_GetLastMessage"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_GetLastMessage")
)
# set length of last_message to 500
length_message = ctypes.c_int(500)
# character array
last_message = ctypes.create_string_buffer(length_message.value)
# initialize output variables
status = ctypes.c_int(-1)
self.dll.IW_GetLastMessage(
ctypes.byref(length_message), last_message, ctypes.byref(status)
)
return last_message.value.decode("utf-8")
def log_last_message(self):
"""prints the last error message (generated when a procedure call
from IWFM API returns an error code (status) other than 0) to the
message log file
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_GetLastMessage"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_GetLastMessage")
)
# initialize output variables
status = ctypes.c_int(-1)
self.dll.IW_LogLastMessage(ctypes.byref(status))
def _is_time_interval_greater_or_equal(
self, time_interval, simulation_time_interval
):
"""determines if a provided time_interval is greater than or
equal to the simulation_time_interval
Parameters
----------
time_interval : str
valid IWFM time interval to compare with the simulation time
interval
simulation_time_interval : str
valid IWFM time interval representing the simulation time
interval
Returns
-------
boolean
True if time interval is greater than or equal to
simulation time interval, otherwise False
"""
# validate time interval
self._validate_time_interval(time_interval)
# validate simulation_time_interval
self._validate_time_interval(simulation_time_interval)
# list of valid time intervals
_valid_time_intervals = [
"1MIN",
"2MIN",
"3MIN",
"4MIN",
"5MIN",
"10MIN",
"15MIN",
"20MIN",
"30MIN",
"1HOUR",
"2HOUR",
"3HOUR",
"4HOUR",
"6HOUR",
"8HOUR",
"12HOUR",
"1DAY",
"1WEEK",
"1MON",
"1YEAR",
]
index_time_interval = _valid_time_intervals.index(time_interval)
index_simulation_interval = _valid_time_intervals.index(
simulation_time_interval
)
if index_time_interval >= index_simulation_interval:
return True
else:
return False
def _validate_iwfm_date(self, dt):
"""performs validation that a provided value is an IWFM-format date string based on
string length and format MM/DD/YYYY_HH:MM
Parameters
----------
dt : str
input value to check if IWFM-format date
Returns
-------
None
raises errors if validation checks do not pass
"""
if not isinstance(dt, str):
raise TypeError("IWFM dates must be a string")
if len(dt) != 16:
raise ValueError(
"IWFM dates must be 16 characters in length and of format MM/DD/YYYY_HH:MM"
)
if "_" not in dt or dt.index("_") != 10:
raise ValueError("IWFM dates must have an '_' separating the date and time")
if ":" not in dt or dt.index(":") != 13:
raise ValueError(
"IWFM dates must have an ':' separating the hours from minutes"
)
if dt[2] != "/" or dt[5] != "/":
raise ValueError(
"IWFM dates must use '/' as separators for the month, day, year in the date"
)
try:
datetime.datetime.strptime(dt, "%m/%d/%Y_%H:%M")
except ValueError:
try:
datetime.datetime.strptime(dt.split("_")[0], "%m/%d/%Y")
except ValueError:
raise ValueError(
"Value provided: {} could not be converted to a date".format(dt)
)
else:
try:
hour = int(dt[11:13])
minute = int(dt[14:])
except:
raise ValueError("hour or minute values are not numbers")
else:
if hour < 0 or hour > 24:
raise ValueError("hour value is not between 00 and 24")
if minute < 0 or minute > 59:
raise ValueError("minute value is not between 00 and 59")
if hour == 24 and minute > 0:
raise ValueError("days cannot exceed 24:00 hours")
def _validate_time_interval(self, time_interval):
"""performs validation that a provided value is an IWFM-format time-interval string
Parameters
----------
time_interval : str (not case-sensitive)
input value to check if IWFM-format time-interval
Returns
-------
None
raises errors if validation checks do not pass
"""
# check input type is a string
if not isinstance(time_interval, str):
raise TypeError(
"time_interval must be a string. type entered is {}.".format(
type(time_interval)
)
)
# list of valid time intervals
_valid_time_intervals = [
"1MIN",
"2MIN",
"3MIN",
"4MIN",
"5MIN",
"10MIN",
"15MIN",
"20MIN",
"30MIN",
"1HOUR",
"2HOUR",
"3HOUR",
"4HOUR",
"6HOUR",
"8HOUR",
"12HOUR",
"1DAY",
"1WEEK",
"1MON",
"1YEAR",
]
if time_interval.upper() not in _valid_time_intervals:
error_string = (
"time_interval entered is not a valid IWFM time interval.\n"
+ "time_interval must be:\n\t-{}".format(
"\n\t-".join(_valid_time_intervals)
)
)
raise ValueError(error_string)
def _string_to_list_by_array(
self, in_string, starting_position_array, length_output_list
):
"""converts a string to a list of strings based on an
array of the starting character position (index).
Parameters
----------
in_string : str or ctypes.Array (character array)
input string that is converted to list of strings
starting_position_array : np.array, list of ints, ctypes.Array
array of starting character index for each value in list.
length_output_list : int, ctypes.c_int, ctypes.c_long
number of non-placeholder values in starting_position_array
Returns
-------
list of strings
input string | |
0] == pytest.approx(0.0, 1.0e-4)
assert deflections[0, 1] == pytest.approx(factor * 0.38209715, 1.0e-4)
deflections = truncated_nfw.deflections_from_grid(
grid=aa.grid_irregular.manual_1d([[1.0, 1.0]])
)
factor = (4.0 * 1.0 * 1.0) / (np.sqrt(2) / 1.0)
assert deflections[0, 0] == pytest.approx(
(1.0 / np.sqrt(2)) * factor * 0.3125838, 1.0e-4
)
assert deflections[0, 1] == pytest.approx(
(1.0 / np.sqrt(2)) * factor * 0.3125838, 1.0e-4
)
truncated_nfw = aast.mp.SphericalTruncatedNFW(
centre=(0.0, 0.0), kappa_s=2.0, scale_radius=1.0, truncation_radius=2.0
)
deflections = truncated_nfw.deflections_from_grid(
grid=aa.grid_irregular.manual_1d([[2.0, 0.0]])
)
factor = (4.0 * 2.0 * 1.0) / (2.0 / 1.0)
assert deflections[0, 0] == pytest.approx(factor * 0.38209715, 1.0e-4)
assert deflections[0, 1] == pytest.approx(0.0, 1.0e-4)
truncated_nfw = aast.mp.SphericalTruncatedNFW(
centre=(0.0, 0.0), kappa_s=1.0, scale_radius=4.0, truncation_radius=2.0
)
deflections = truncated_nfw.deflections_from_grid(
grid=aa.coordinates([[(2.0, 0.0)]])
)
assert deflections[0][0][0] == pytest.approx(2.1702661386, 1.0e-4)
assert deflections[0][0][1] == pytest.approx(0.0, 1.0e-4)
def test__compare_nfw_and_truncated_nfw_with_large_truncation_radius__convergence_and_deflections_identical(
self
):
truncated_nfw = aast.mp.SphericalTruncatedNFW(
centre=(0.0, 0.0), kappa_s=1.0, scale_radius=4.0, truncation_radius=50000.0
)
nfw = aast.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=4.0)
truncated_nfw_convergence = truncated_nfw.convergence_from_grid(
grid=aa.grid_irregular.manual_1d([[2.0, 2.0], [3.0, 1.0], [-1.0, -9.0]])
)
nfw_convergence = nfw.convergence_from_grid(
grid=aa.grid_irregular.manual_1d([[2.0, 2.0], [3.0, 1.0], [-1.0, -9.0]])
)
assert truncated_nfw_convergence == pytest.approx(nfw_convergence, 1.0e-4)
truncated_nfw_deflections = truncated_nfw.deflections_from_grid(
grid=aa.grid_irregular.manual_1d([[2.0, 2.0], [3.0, 1.0], [-1.0, -9.0]])
)
nfw_deflections = nfw.deflections_from_grid(
grid=aa.grid_irregular.manual_1d([[2.0, 2.0], [3.0, 1.0], [-1.0, -9.0]])
)
assert truncated_nfw_deflections == pytest.approx(nfw_deflections, 1.0e-4)
def test__deflections_of_spherical_profile__dont_use_interpolate_and_cache_decorators(
self
):
truncated_nfw = aast.mp.SphericalTruncatedNFW(
centre=(-0.7, 0.5), kappa_s=1.0, scale_radius=8.0, truncation_radius=2.0
)
mask = np.array(
[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, True, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
]
)
mask = aa.mask.manual(mask, pixel_scales=(1.0, 1.0), sub_size=1)
grid = aa.masked.grid.from_mask(mask=mask)
regular_with_interp = grid.new_grid_with_interpolator(
pixel_scale_interpolation_grid=0.5
)
interp_deflections = truncated_nfw.deflections_from_grid(
grid=regular_with_interp
)
interpolator = grids.Interpolator.from_mask_grid_and_pixel_scale_interpolation_grids(
mask=mask, grid=grid, pixel_scale_interpolation_grid=0.5
)
interp_deflections_values = truncated_nfw.deflections_from_grid(
grid=interpolator.interp_grid
)
interp_deflections_manual_y = interpolator.interpolated_values_from_values(
values=interp_deflections_values[:, 0]
)
interp_deflections_manual_x = interpolator.interpolated_values_from_values(
values=interp_deflections_values[:, 1]
)
assert (interp_deflections_manual_y != interp_deflections[:, 0]).all()
assert (interp_deflections_manual_x != interp_deflections[:, 1]).all()
def test__mass_at_truncation_radius__values(self):
truncated_nfw = aast.mp.SphericalTruncatedNFW(
centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0, truncation_radius=1.0
)
cosmology = mock_cosmology.MockCosmology(
arcsec_per_kpc=1.0,
kpc_per_arcsec=1.0,
critical_surface_density=1.0,
cosmic_average_density=1.0,
)
mass_at_truncation_radius = truncated_nfw.mass_at_truncation_radius(
redshift_profile=0.5,
redshift_source=1.0,
unit_length="arcsec",
unit_mass="solMass",
cosmology=cosmology,
)
assert mass_at_truncation_radius == pytest.approx(0.00009792581, 1.0e-5)
# truncated_nfw = aast.mp.SphericalTruncatedNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0,
# truncation_radius=1.0)
#
# cosmology = mock_cosmology.MockCosmology(arcsec_per_kpc=1.0, kpc_per_arcsec=1.0, critical_surface_density=2.0,
# cosmic_average_density=3.0)
#
# mass_at_truncation_radius = truncated_nfw.mass_at_truncation_radius(redshift_lens=0.5, redshift_source=1.0,
# unit_length='arcsec', unit_mass='solMass', cosmology=cosmology)
#
# assert mass_at_truncation_radius == pytest.approx(0.00008789978, 1.0e-5)
#
# truncated_nfw = aast.mp.SphericalTruncatedNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=2.0,
# truncation_radius=1.0)
#
# mass_at_truncation_radius = truncated_nfw.mass_at_truncation_radius(redshift_lens=0.5, redshift_source=1.0,
# unit_length='arcsec', unit_mass='solMass', cosmology=cosmology)
#
# assert mass_at_truncation_radius == pytest.approx(0.0000418378, 1.0e-5)
#
# truncated_nfw = aast.mp.SphericalTruncatedNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=8.0,
# truncation_radius=4.0)
#
# mass_at_truncation_radius = truncated_nfw.mass_at_truncation_radius(redshift_lens=0.5, redshift_source=1.0,
# unit_length='arcsec', unit_mass='solMass', cosmology=cosmology)
#
# assert mass_at_truncation_radius == pytest.approx(0.0000421512, 1.0e-4)
#
# truncated_nfw = aast.mp.SphericalTruncatedNFW(centre=(0.0, 0.0), kappa_s=2.0, scale_radius=8.0,
# truncation_radius=4.0)
#
# mass_at_truncation_radius = truncated_nfw.mass_at_truncation_radius(redshift_lens=0.5, redshift_source=1.0,
# unit_length='arcsec', unit_mass='solMass', cosmology=cosmology)
#
# assert mass_at_truncation_radius == pytest.approx(0.00033636625, 1.0e-4)
def test_summarize_in_units(self):
test_path = "{}/../../test_files/config/summary".format(
os.path.dirname(os.path.realpath(__file__))
)
af.conf.instance = af.conf.Config(config_path=test_path)
cosmology = cosmo.LambdaCDM(H0=70.0, Om0=0.3, Ode0=0.7)
nfw = aast.mp.SphericalTruncatedNFW(
kappa_s=0.5, scale_radius=5.0, truncation_radius=10.0
)
summary_text = nfw.summarize_in_units(
radii=[aast.dim.Length(10.0), aast.dim.Length(500.0)],
prefix="nfw_",
unit_length="kpc",
unit_mass="solMass",
redshift_profile=0.6,
redshift_source=2.5,
redshift_of_cosmic_average_density="profile",
whitespace=50,
cosmology=cosmology,
)
i = 0
assert summary_text[i] == "Mass Profile = SphericalTruncatedNFW\n"
i += 1
assert (
summary_text[i]
== "nfw_einstein_radius 15.36 kpc"
)
i += 1
assert (
summary_text[i]
== "nfw_einstein_mass 1.4377e+12 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_within_10.00_kpc 5.2061e+12 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_within_500.00_kpc 7.3287e+12 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_rho_at_scale_radius 29027857.02 solMass/kpc3"
)
i += 1
assert (
summary_text[i]
== "nfw_delta_concentration 110665.28"
)
i += 1
assert (
summary_text[i] == "nfw_concentration 14.40"
)
i += 1
assert (
summary_text[i]
== "nfw_radius_at_200x_cosmic_density 481.41 kpc"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_at_200x_cosmic_density 2.4517e+13 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_at_truncation_radius 1.3190e+13 solMass"
)
i += 1
def test__outputs_are_autoarrays(self):
grid = aa.grid.uniform(shape_2d=(2, 2), pixel_scales=1.0, sub_size=1)
truncated_nfw = aast.mp.SphericalTruncatedNFW()
convergence = truncated_nfw.convergence_from_grid(grid=grid)
assert convergence.shape_2d == (2, 2)
potential = truncated_nfw.potential_from_grid(grid=grid)
assert potential.shape_2d == (2, 2)
deflections = truncated_nfw.deflections_from_grid(grid=grid)
assert deflections.shape_2d == (2, 2)
class TestTruncatedNFWChallenge:
def test__constructor_and_units(self):
truncated_nfw = aast.mp.SphericalTruncatedNFWChallenge(
centre=(1.0, 2.0), kappa_s=2.0, scale_radius=10.0
)
assert truncated_nfw.centre == (1.0, 2.0)
assert isinstance(truncated_nfw.centre[0], aast.dim.Length)
assert isinstance(truncated_nfw.centre[1], aast.dim.Length)
assert truncated_nfw.centre[0].unit == "arcsec"
assert truncated_nfw.centre[1].unit == "arcsec"
assert truncated_nfw.axis_ratio == 1.0
assert isinstance(truncated_nfw.axis_ratio, float)
assert truncated_nfw.phi == 0.0
assert isinstance(truncated_nfw.phi, float)
assert truncated_nfw.kappa_s == 2.0
assert isinstance(truncated_nfw.kappa_s, float)
assert truncated_nfw.inner_slope == 1.0
assert isinstance(truncated_nfw.inner_slope, float)
assert truncated_nfw.scale_radius == 10.0
assert isinstance(truncated_nfw.scale_radius, aast.dim.Length)
assert truncated_nfw.scale_radius.unit_length == "arcsec"
assert truncated_nfw.truncation_radius == pytest.approx(
2.0 * 189.26967095554755, 1.0e-4
)
assert isinstance(truncated_nfw.truncation_radius, aast.dim.Length)
assert truncated_nfw.truncation_radius.unit_length == "arcsec"
class TestTruncatedNFWMassToConcentration:
def test__mass_and_concentration_consistent_with_normal_truncated_nfw(self):
cosmology = cosmo.FlatLambdaCDM(H0=70.0, Om0=0.3)
truncated_nfw_mass = aast.mp.SphericalTruncatedNFWMassToConcentration(
centre=(1.0, 2.0), mass_at_200=1.0e9
)
mass_at_200_via_mass = truncated_nfw_mass.mass_at_200_for_units(
unit_mass="solMass",
unit_length="arcsec",
redshift_object=0.6,
redshift_source=2.5,
cosmology=cosmology,
)
concentration_via_mass = truncated_nfw_mass.concentration_for_units(
unit_mass="solMass",
unit_length="arcsec",
redshift_profile=0.6,
redshift_source=2.5,
cosmology=cosmology,
)
truncated_nfw_kappa_s = aast.mp.SphericalTruncatedNFW(
centre=(1.0, 2.0),
kappa_s=truncated_nfw_mass.kappa_s,
scale_radius=truncated_nfw_mass.scale_radius,
truncation_radius=truncated_nfw_mass.truncation_radius,
)
mass_at_200_via_kappa_s = truncated_nfw_kappa_s.mass_at_200_for_units(
unit_mass="solMass",
unit_length="arcsec",
redshift_object=0.6,
redshift_source=2.5,
cosmology=cosmology,
)
concentration_via_kappa_s = truncated_nfw_kappa_s.concentration_for_units(
unit_mass="solMass",
unit_length="arcsec",
redshift_profile=0.6,
redshift_source=2.5,
cosmology=cosmology,
)
# We uare using the SphericalTruncatedNFW to check the mass gives a conosistnt kappa_s, given certain radii.
assert mass_at_200_via_kappa_s == mass_at_200_via_mass
assert concentration_via_kappa_s == concentration_via_mass
assert isinstance(truncated_nfw_mass.kappa_s, float)
assert truncated_nfw_mass.centre == (1.0, 2.0)
assert isinstance(truncated_nfw_mass.centre[0], aast.dim.Length)
assert isinstance(truncated_nfw_mass.centre[1], aast.dim.Length)
assert truncated_nfw_mass.centre[0].unit == "arcsec"
assert truncated_nfw_mass.centre[1].unit == "arcsec"
assert truncated_nfw_mass.axis_ratio == 1.0
assert isinstance(truncated_nfw_mass.axis_ratio, float)
assert truncated_nfw_mass.phi == 0.0
assert isinstance(truncated_nfw_mass.phi, float)
assert truncated_nfw_mass.inner_slope == 1.0
assert isinstance(truncated_nfw_mass.inner_slope, float)
assert truncated_nfw_mass.scale_radius == pytest.approx(0.193017, 1.0e-4)
assert isinstance(truncated_nfw_mass.scale_radius, aast.dim.Length)
assert truncated_nfw_mass.scale_radius.unit_length == "arcsec"
assert truncated_nfw_mass.truncation_radius == pytest.approx(
33.1428053449, 1.0e-4
)
assert isinstance(truncated_nfw_mass.truncation_radius, aast.dim.Length)
assert truncated_nfw_mass.truncation_radius.unit_length == "arcsec"
def test_summarize_in_units(self):
test_path = "{}/../../test_files/config/summary".format(
os.path.dirname(os.path.realpath(__file__))
)
af.conf.instance = af.conf.Config(config_path=test_path)
cosmology = cosmo.LambdaCDM(H0=70.0, Om0=0.3, Ode0=0.7)
nfw = aast.mp.SphericalTruncatedNFW(
kappa_s=0.5, scale_radius=5.0, truncation_radius=10.0
)
summary_text = nfw.summarize_in_units(
radii=[aast.dim.Length(10.0), aast.dim.Length(500.0)],
prefix="nfw_",
unit_length="kpc",
unit_mass="solMass",
redshift_profile=0.6,
redshift_source=2.5,
redshift_of_cosmic_average_density="profile",
whitespace=50,
cosmology=cosmology,
)
i = 0
assert summary_text[i] == "Mass Profile = SphericalTruncatedNFW\n"
i += 1
assert (
summary_text[i]
== "nfw_einstein_radius 15.36 kpc"
)
i += 1
assert (
summary_text[i]
== "nfw_einstein_mass 1.4377e+12 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_within_10.00_kpc 5.2061e+12 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_within_500.00_kpc 7.3287e+12 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_rho_at_scale_radius 29027857.02 solMass/kpc3"
)
i += 1
assert (
summary_text[i]
== "nfw_delta_concentration 110665.28"
)
i += 1
assert (
summary_text[i] == "nfw_concentration 14.40"
)
i += 1
assert (
summary_text[i]
== "nfw_radius_at_200x_cosmic_density 481.41 kpc"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_at_200x_cosmic_density 2.4517e+13 solMass"
)
i += 1
assert (
summary_text[i]
== "nfw_mass_at_truncation_radius 1.3190e+13 solMass"
)
i += 1
def test__outputs_are_autoarrays(self):
grid = aa.grid.uniform(shape_2d=(2, 2), pixel_scales=1.0, sub_size=1)
truncated_nfw = aast.mp.SphericalTruncatedNFW()
convergence = truncated_nfw.convergence_from_grid(grid=grid)
assert convergence.shape_2d == (2, 2)
potential = truncated_nfw.potential_from_grid(grid=grid)
assert potential.shape_2d == (2, 2)
deflections = truncated_nfw.deflections_from_grid(grid=grid)
assert deflections.shape_2d == (2, 2)
class TestNFW:
def test__constructor_and_units(self):
nfw = aast.mp.EllipticalNFW(
centre=(1.0, 2.0), axis_ratio=0.5, phi=45.0, kappa_s=2.0, scale_radius=10.0
)
assert nfw.centre == (1.0, 2.0)
assert isinstance(nfw.centre[0], aast.dim.Length)
assert isinstance(nfw.centre[1], aast.dim.Length)
assert nfw.centre[0].unit == "arcsec"
assert nfw.centre[1].unit == "arcsec"
assert nfw.axis_ratio == 0.5
assert isinstance(nfw.axis_ratio, float)
assert nfw.phi == 45.0
assert isinstance(nfw.phi, float)
assert nfw.kappa_s == 2.0
assert isinstance(nfw.kappa_s, float)
assert nfw.inner_slope == 1.0
assert isinstance(nfw.inner_slope, float)
assert nfw.scale_radius == 10.0
assert isinstance(nfw.scale_radius, aast.dim.Length)
assert nfw.scale_radius.unit_length == "arcsec"
nfw = aast.mp.SphericalNFW(centre=(1.0, 2.0), kappa_s=2.0, scale_radius=10.0)
assert nfw.centre == (1.0, 2.0)
assert isinstance(nfw.centre[0], aast.dim.Length)
assert isinstance(nfw.centre[1], aast.dim.Length)
assert nfw.centre[0].unit == "arcsec"
assert nfw.centre[1].unit == "arcsec"
assert nfw.axis_ratio == 1.0
assert isinstance(nfw.axis_ratio, float)
assert nfw.phi == 0.0
assert isinstance(nfw.phi, float)
assert nfw.kappa_s == 2.0
assert isinstance(nfw.kappa_s, float)
assert nfw.inner_slope == 1.0
assert isinstance(nfw.inner_slope, float)
assert nfw.scale_radius == 10.0
assert isinstance(nfw.scale_radius, aast.dim.Length)
assert nfw.scale_radius.unit_length == "arcsec"
def test__convergence_correct_values(self):
# r = 2.0 (> 1.0)
# F(r) = (1/(sqrt(3))*atan(sqrt(3)) = 0.60459978807
# kappa(r) = 2 * kappa_s * (1 - 0.60459978807) / (4-1) = 0.263600141
nfw = aast.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0)
assert nfw.convergence_from_grid(
grid=aa.grid_irregular.manual_1d([[2.0, 0.0]])
) == pytest.approx(0.263600141, 1e-3)
nfw = aast.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0)
assert nfw.convergence_from_grid(
grid=aa.grid_irregular.manual_1d([[0.5, 0.0]])
) == pytest.approx(1.388511, 1e-3)
nfw = aast.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=2.0, scale_radius=1.0)
assert nfw.convergence_from_grid(
grid=aa.grid_irregular.manual_1d([[0.5, 0.0]])
) == pytest.approx(2.0 * 1.388511, 1e-3)
nfw = aast.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=2.0)
assert nfw.convergence_from_grid(
grid=aa.grid_irregular.manual_1d([[1.0, 0.0]])
) == pytest.approx(1.388511, 1e-3)
| |
<reponame>Qointum/pypy
import py, weakref
from rpython.jit.backend import model
from rpython.jit.backend.llgraph import support
from rpython.jit.backend.llsupport import symbolic
from rpython.jit.metainterp.history import AbstractDescr
from rpython.jit.metainterp.history import Const, getkind
from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp.optimizeopt import intbounds
from rpython.jit.codewriter import longlong, heaptracker
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.rtyper.llinterp import LLInterpreter, LLException
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr
from rpython.rtyper import rclass
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong
class LLTrace(object):
has_been_freed = False
invalid = False
def __init__(self, inputargs, operations):
# We need to clone the list of operations because the
# front-end will mutate them under our feet again. We also
# need to make sure things get freed.
def mapping(box, _cache={}):
if isinstance(box, Const) or box is None:
return box
try:
newbox = _cache[box]
except KeyError:
newbox = _cache[box] = box.__class__()
return newbox
#
self.inputargs = map(mapping, inputargs)
self.operations = []
for op in operations:
if op.getdescr() is not None:
if op.is_guard() or op.getopnum() == rop.FINISH:
newdescr = op.getdescr()
else:
newdescr = WeakrefDescr(op.getdescr())
else:
newdescr = None
newop = op.copy_and_change(op.getopnum(),
map(mapping, op.getarglist()),
mapping(op.result),
newdescr)
if op.getfailargs() is not None:
newop.setfailargs(map(mapping, op.getfailargs()))
self.operations.append(newop)
class WeakrefDescr(AbstractDescr):
def __init__(self, realdescr):
self.realdescrref = weakref.ref(realdescr)
self.final_descr = getattr(realdescr, 'final_descr', False)
class ExecutionFinished(Exception):
def __init__(self, deadframe):
self.deadframe = deadframe
class Jump(Exception):
def __init__(self, jump_target, args):
self.jump_target = jump_target
self.args = args
class CallDescr(AbstractDescr):
def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI):
self.RESULT = RESULT
self.ARGS = ARGS
self.ABI = ABI
self.extrainfo = extrainfo
def __repr__(self):
return 'CallDescr(%r, %r, %r)' % (self.RESULT, self.ARGS,
self.extrainfo)
def get_extra_info(self):
return self.extrainfo
def get_arg_types(self):
return ''.join([getkind(ARG)[0] for ARG in self.ARGS])
def get_result_type(self):
return getkind(self.RESULT)[0]
class SizeDescr(AbstractDescr):
def __init__(self, S):
self.S = S
def as_vtable_size_descr(self):
return self
def count_fields_if_immutable(self):
return heaptracker.count_fields_if_immutable(self.S)
def __repr__(self):
return 'SizeDescr(%r)' % (self.S,)
class FieldDescr(AbstractDescr):
def __init__(self, S, fieldname):
self.S = S
self.fieldname = fieldname
self.FIELD = getattr(S, fieldname)
def get_vinfo(self):
return self.vinfo
def __repr__(self):
return 'FieldDescr(%r, %r)' % (self.S, self.fieldname)
def sort_key(self):
return self.fieldname
def is_pointer_field(self):
return getkind(self.FIELD) == 'ref'
def is_float_field(self):
return getkind(self.FIELD) == 'float'
def is_field_signed(self):
return _is_signed_kind(self.FIELD)
def is_integer_bounded(self):
return getkind(self.FIELD) == 'int' \
and rffi.sizeof(self.FIELD) < symbolic.WORD
def get_integer_min(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_min(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
def get_integer_max(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_max(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
def _is_signed_kind(TYPE):
return (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
rffi.cast(TYPE, -1) == -1)
class ArrayDescr(AbstractDescr):
def __init__(self, A):
self.A = self.OUTERA = A
if isinstance(A, lltype.Struct):
self.A = A._flds[A._arrayfld]
def __repr__(self):
return 'ArrayDescr(%r)' % (self.OUTERA,)
def is_array_of_pointers(self):
return getkind(self.A.OF) == 'ref'
def is_array_of_floats(self):
return getkind(self.A.OF) == 'float'
def is_item_signed(self):
return _is_signed_kind(self.A.OF)
def is_array_of_structs(self):
return isinstance(self.A.OF, lltype.Struct)
def is_item_integer_bounded(self):
return getkind(self.A.OF) == 'int' \
and rffi.sizeof(self.A.OF) < symbolic.WORD
def get_item_integer_min(self):
if getkind(self.A.OF) != 'int':
assert False
return intbounds.get_integer_min(
not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF))
def get_item_integer_max(self):
if getkind(self.A.OF) != 'int':
assert False
return intbounds.get_integer_max(
not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF))
class InteriorFieldDescr(AbstractDescr):
def __init__(self, A, fieldname):
self.A = A
self.fieldname = fieldname
self.FIELD = getattr(A.OF, fieldname)
def __repr__(self):
return 'InteriorFieldDescr(%r, %r)' % (self.A, self.fieldname)
def sort_key(self):
return self.fieldname
def is_pointer_field(self):
return getkind(self.FIELD) == 'ref'
def is_float_field(self):
return getkind(self.FIELD) == 'float'
def is_integer_bounded(self):
return getkind(self.FIELD) == 'int' \
and rffi.sizeof(self.FIELD) < symbolic.WORD
def get_integer_min(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_min(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
def get_integer_max(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_max(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
_example_res = {'v': None,
'r': lltype.nullptr(llmemory.GCREF.TO),
'i': 0,
'f': 0.0}
class LLGraphCPU(model.AbstractCPU):
from rpython.jit.metainterp.typesystem import llhelper as ts
supports_floats = True
supports_longlong = r_uint is not r_ulonglong
supports_singlefloats = True
translate_support_code = False
is_llgraph = True
def __init__(self, rtyper, stats=None, *ignored_args, **kwds):
model.AbstractCPU.__init__(self)
self.rtyper = rtyper
self.llinterp = LLInterpreter(rtyper)
self.descrs = {}
class MiniStats:
pass
self.stats = stats or MiniStats()
self.vinfo_for_tests = kwds.get('vinfo_for_tests', None)
def compile_loop(self, inputargs, operations, looptoken, jd_id=0,
unique_id=0, log=True, name='', logger=None):
clt = model.CompiledLoopToken(self, looptoken.number)
looptoken.compiled_loop_token = clt
lltrace = LLTrace(inputargs, operations)
clt._llgraph_loop = lltrace
clt._llgraph_alltraces = [lltrace]
self._record_labels(lltrace)
def compile_bridge(self, faildescr, inputargs, operations,
original_loop_token, log=True, logger=None):
clt = original_loop_token.compiled_loop_token
clt.compiling_a_bridge()
lltrace = LLTrace(inputargs, operations)
faildescr._llgraph_bridge = lltrace
clt._llgraph_alltraces.append(lltrace)
self._record_labels(lltrace)
def _record_labels(self, lltrace):
for i, op in enumerate(lltrace.operations):
if op.getopnum() == rop.LABEL:
_getdescr(op)._llgraph_target = (lltrace, i)
def invalidate_loop(self, looptoken):
for trace in looptoken.compiled_loop_token._llgraph_alltraces:
trace.invalid = True
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
oldtrace = oldlooptoken.compiled_loop_token._llgraph_loop
newtrace = newlooptoken.compiled_loop_token._llgraph_loop
OLD = [box.type for box in oldtrace.inputargs]
NEW = [box.type for box in newtrace.inputargs]
assert OLD == NEW
assert not hasattr(oldlooptoken, '_llgraph_redirected')
oldlooptoken.compiled_loop_token._llgraph_redirected = True
oldlooptoken.compiled_loop_token._llgraph_loop = newtrace
alltraces = newlooptoken.compiled_loop_token._llgraph_alltraces
oldlooptoken.compiled_loop_token._llgraph_alltraces = alltraces
def free_loop_and_bridges(self, compiled_loop_token):
for c in compiled_loop_token._llgraph_alltraces:
c.has_been_freed = True
compiled_loop_token._llgraph_alltraces = []
compiled_loop_token._llgraph_loop = None
model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token)
def make_execute_token(self, *argtypes):
return self._execute_token
def _execute_token(self, loop_token, *args):
lltrace = loop_token.compiled_loop_token._llgraph_loop
frame = LLFrame(self, lltrace.inputargs, args)
try:
frame.execute(lltrace)
assert False
except ExecutionFinished, e:
return e.deadframe
def get_int_value(self, deadframe, index):
v = deadframe._values[index]
assert lltype.typeOf(v) == lltype.Signed
return v
def get_ref_value(self, deadframe, index):
v = deadframe._values[index]
assert lltype.typeOf(v) == llmemory.GCREF
return v
def get_float_value(self, deadframe, index):
v = deadframe._values[index]
assert lltype.typeOf(v) == longlong.FLOATSTORAGE
return v
def get_latest_descr(self, deadframe):
return deadframe._latest_descr
def grab_exc_value(self, deadframe):
if deadframe._last_exception is not None:
result = deadframe._last_exception.args[1]
gcref = lltype.cast_opaque_ptr(llmemory.GCREF, result)
else:
gcref = lltype.nullptr(llmemory.GCREF.TO)
return gcref
def force(self, force_token):
frame = force_token
assert isinstance(frame, LLFrame)
assert frame.forced_deadframe is None
values = []
for box in frame.force_guard_op.getfailargs():
if box is not None:
if box is not frame.current_op.result:
value = frame.env[box]
else:
value = box.value # 0 or 0.0 or NULL
else:
value = None
values.append(value)
frame.forced_deadframe = LLDeadFrame(
_getdescr(frame.force_guard_op), values)
return frame.forced_deadframe
def set_savedata_ref(self, deadframe, data):
deadframe._saved_data = data
def get_savedata_ref(self, deadframe):
assert deadframe._saved_data is not None
return deadframe._saved_data
# ------------------------------------------------------------
def calldescrof(self, FUNC, ARGS, RESULT, effect_info):
key = ('call', getkind(RESULT),
tuple([getkind(A) for A in ARGS]),
effect_info)
try:
return self.descrs[key]
except KeyError:
descr = CallDescr(RESULT, ARGS, effect_info)
self.descrs[key] = descr
return descr
def sizeof(self, S):
key = ('size', S)
try:
return self.descrs[key]
except KeyError:
descr = SizeDescr(S)
self.descrs[key] = descr
return descr
def fielddescrof(self, S, fieldname):
key = ('field', S, fieldname)
try:
return self.descrs[key]
except KeyError:
descr = FieldDescr(S, fieldname)
self.descrs[key] = descr
if self.vinfo_for_tests is not None:
descr.vinfo = self.vinfo_for_tests
return descr
def arraydescrof(self, A):
key = ('array', A)
try:
return self.descrs[key]
except KeyError:
descr = ArrayDescr(A)
self.descrs[key] = descr
return descr
def interiorfielddescrof(self, A, fieldname):
key = ('interiorfield', A, fieldname)
try:
return self.descrs[key]
except KeyError:
descr = InteriorFieldDescr(A, fieldname)
self.descrs[key] = descr
return descr
def _calldescr_dynamic_for_tests(self, atypes, rtype,
abiname='FFI_DEFAULT_ABI'):
# XXX WTF is that and why it breaks all abstractions?
from rpython.jit.backend.llsupport import ffisupport
return ffisupport.calldescr_dynamic_for_tests(self, atypes, rtype,
abiname)
def calldescrof_dynamic(self, cif_description, extrainfo):
# XXX WTF, this is happy nonsense
from rpython.jit.backend.llsupport.ffisupport import get_ffi_type_kind
from rpython.jit.backend.llsupport.ffisupport import UnsupportedKind
ARGS = []
try:
for itp in range(cif_description.nargs):
arg = cif_description.atypes[itp]
kind = get_ffi_type_kind(self, arg)
if kind != VOID:
ARGS.append(support.kind2TYPE[kind[0]])
RESULT = support.kind2TYPE[get_ffi_type_kind(self, cif_description.rtype)[0]]
except UnsupportedKind:
return None
key = ('call_dynamic', RESULT, tuple(ARGS),
extrainfo, cif_description.abi)
try:
return self.descrs[key]
except KeyError:
descr = CallDescr(RESULT, ARGS, extrainfo, ABI=cif_description.abi)
self.descrs[key] = descr
return descr
# ------------------------------------------------------------
def maybe_on_top_of_llinterp(self, func, args, RESULT):
ptr = llmemory.cast_int_to_adr(func).ptr
if hasattr(ptr._obj, 'graph'):
res = self.llinterp.eval_graph(ptr._obj.graph, args)
else:
res = ptr._obj._callable(*args)
if RESULT is lltype.Void:
return None
return support.cast_result(RESULT, res)
def _do_call(self, func, args_i, args_r, args_f, calldescr):
TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE
args = support.cast_call_args(TP.ARGS, args_i, args_r, args_f)
return self.maybe_on_top_of_llinterp(func, args, TP.RESULT)
bh_call_i = _do_call
bh_call_r = _do_call
bh_call_f = _do_call
bh_call_v = _do_call
def bh_getfield_gc(self, p, descr):
p = support.cast_arg(lltype.Ptr(descr.S), p)
return support.cast_result(descr.FIELD, getattr(p, descr.fieldname))
bh_getfield_gc_pure = bh_getfield_gc
bh_getfield_gc_i = bh_getfield_gc
bh_getfield_gc_r = bh_getfield_gc
bh_getfield_gc_f = bh_getfield_gc
bh_getfield_raw = bh_getfield_gc
bh_getfield_raw_pure = bh_getfield_raw
bh_getfield_raw_i = bh_getfield_raw
bh_getfield_raw_r = bh_getfield_raw
bh_getfield_raw_f = bh_getfield_raw
def bh_setfield_gc(self, p, newvalue, descr):
p = support.cast_arg(lltype.Ptr(descr.S), p)
setattr(p, descr.fieldname, support.cast_arg(descr.FIELD, newvalue))
bh_setfield_gc_i = bh_setfield_gc
bh_setfield_gc_r = bh_setfield_gc
bh_setfield_gc_f = bh_setfield_gc
bh_setfield_raw = bh_setfield_gc
bh_setfield_raw_i = bh_setfield_raw
bh_setfield_raw_f = bh_setfield_raw
def bh_arraylen_gc(self, a, descr):
array = a._obj.container
if descr.A is not descr.OUTERA:
array = getattr(array, descr.OUTERA._arrayfld)
return array.getlength()
def bh_getarrayitem_gc(self, a, index, descr):
a = support.cast_arg(lltype.Ptr(descr.A), a)
array = a._obj
return support.cast_result(descr.A.OF, array.getitem(index))
bh_getarrayitem_gc_pure = bh_getarrayitem_gc
bh_getarrayitem_gc_i = bh_getarrayitem_gc
bh_getarrayitem_gc_r = | |
<reponame>yiannist/pkg-ganeti<gh_stars>0
#
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Cluster related commands"""
# pylint: disable=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-cluster
from cStringIO import StringIO
import os
import time
import OpenSSL
import tempfile
import itertools
from ganeti.cli import *
from ganeti import bootstrap
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import netutils
from ganeti import objects
from ganeti import opcodes
from ganeti import pathutils
from ganeti import qlang
from ganeti import serializer
from ganeti import ssconf
from ganeti import ssh
from ganeti import uidpool
from ganeti import utils
from ganeti.client import base
ON_OPT = cli_option("--on", default=False,
action="store_true", dest="on",
help="Recover from an EPO")
GROUPS_OPT = cli_option("--groups", default=False,
action="store_true", dest="groups",
help="Arguments are node groups instead of nodes")
FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
help="Override interactive check for --no-voting",
default=False, action="store_true")
FORCE_DISTRIBUTION = cli_option("--yes-do-it", dest="yes_do_it",
help="Unconditionally distribute the"
" configuration, even if the queue"
" is drained",
default=False, action="store_true")
TO_OPT = cli_option("--to", default=None, type="string",
help="The Ganeti version to upgrade to")
RESUME_OPT = cli_option("--resume", default=False, action="store_true",
help="Resume any pending Ganeti upgrades")
_EPO_PING_INTERVAL = 30 # 30 seconds between pings
_EPO_PING_TIMEOUT = 1 # 1 second
_EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
def _InitEnabledDiskTemplates(opts):
"""Initialize the list of enabled disk templates.
"""
if opts.enabled_disk_templates:
return opts.enabled_disk_templates.split(",")
else:
return constants.DEFAULT_ENABLED_DISK_TEMPLATES
def _InitVgName(opts, enabled_disk_templates):
"""Initialize the volume group name.
@type enabled_disk_templates: list of strings
@param enabled_disk_templates: cluster-wide enabled disk templates
"""
vg_name = None
if opts.vg_name is not None:
vg_name = opts.vg_name
if vg_name:
if not utils.IsLvmEnabled(enabled_disk_templates):
ToStdout("You specified a volume group with --vg-name, but you did not"
" enable any disk template that uses lvm.")
elif utils.IsLvmEnabled(enabled_disk_templates):
raise errors.OpPrereqError(
"LVM disk templates are enabled, but vg name not set.")
elif utils.IsLvmEnabled(enabled_disk_templates):
vg_name = constants.DEFAULT_VG
return vg_name
def _InitDrbdHelper(opts, enabled_disk_templates):
"""Initialize the DRBD usermode helper.
"""
drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
if not drbd_enabled and opts.drbd_helper is not None:
ToStdout("Note: You specified a DRBD usermode helper, while DRBD storage"
" is not enabled.")
if drbd_enabled:
if opts.drbd_helper is None:
return constants.DEFAULT_DRBD_HELPER
if opts.drbd_helper == '':
raise errors.OpPrereqError(
"Unsetting the drbd usermode helper while enabling DRBD is not"
" allowed.")
return opts.drbd_helper
@UsesRPC
def InitCluster(opts, args):
"""Initialize the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the desired
cluster name
@rtype: int
@return: the desired exit code
"""
enabled_disk_templates = _InitEnabledDiskTemplates(opts)
try:
vg_name = _InitVgName(opts, enabled_disk_templates)
drbd_helper = _InitDrbdHelper(opts, enabled_disk_templates)
except errors.OpPrereqError, e:
ToStderr(str(e))
return 1
master_netdev = opts.master_netdev
if master_netdev is None:
nic_mode = opts.nicparams.get(constants.NIC_MODE, None)
if not nic_mode:
# default case, use bridging
master_netdev = constants.DEFAULT_BRIDGE
elif nic_mode == constants.NIC_MODE_OVS:
# default ovs is different from default bridge
master_netdev = constants.DEFAULT_OVS
opts.nicparams[constants.NIC_LINK] = constants.DEFAULT_OVS
hvlist = opts.enabled_hypervisors
if hvlist is None:
hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
hvlist = hvlist.split(",")
hvparams = dict(opts.hvparams)
beparams = opts.beparams
nicparams = opts.nicparams
diskparams = dict(opts.diskparams)
# check the disk template types here, as we cannot rely on the type check done
# by the opcode parameter types
diskparams_keys = set(diskparams.keys())
if not (diskparams_keys <= constants.DISK_TEMPLATES):
unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
return 1
# prepare beparams dict
beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
# prepare nicparams dict
nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
# prepare ndparams dict
if opts.ndparams is None:
ndparams = dict(constants.NDC_DEFAULTS)
else:
ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
# prepare hvparams dict
for hv in constants.HYPER_TYPES:
if hv not in hvparams:
hvparams[hv] = {}
hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
# prepare diskparams dict
for templ in constants.DISK_TEMPLATES:
if templ not in diskparams:
diskparams[templ] = {}
diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
diskparams[templ])
utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
# prepare ipolicy dict
ipolicy = CreateIPolicyFromOpts(
ispecs_mem_size=opts.ispecs_mem_size,
ispecs_cpu_count=opts.ispecs_cpu_count,
ispecs_disk_count=opts.ispecs_disk_count,
ispecs_disk_size=opts.ispecs_disk_size,
ispecs_nic_count=opts.ispecs_nic_count,
minmax_ispecs=opts.ipolicy_bounds_specs,
std_ispecs=opts.ipolicy_std_specs,
ipolicy_disk_templates=opts.ipolicy_disk_templates,
ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
fill_all=True)
if opts.candidate_pool_size is None:
opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
if opts.mac_prefix is None:
opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
uid_pool = opts.uid_pool
if uid_pool is not None:
uid_pool = uidpool.ParseUidPool(uid_pool)
if opts.prealloc_wipe_disks is None:
opts.prealloc_wipe_disks = False
external_ip_setup_script = opts.use_external_mip_script
if external_ip_setup_script is None:
external_ip_setup_script = False
try:
primary_ip_version = int(opts.primary_ip_version)
except (ValueError, TypeError), err:
ToStderr("Invalid primary ip version value: %s" % str(err))
return 1
master_netmask = opts.master_netmask
try:
if master_netmask is not None:
master_netmask = int(master_netmask)
except (ValueError, TypeError), err:
ToStderr("Invalid master netmask value: %s" % str(err))
return 1
if opts.disk_state:
disk_state = utils.FlatToDict(opts.disk_state)
else:
disk_state = {}
hv_state = dict(opts.hv_state)
if opts.install_image:
install_image = opts.install_image
else:
install_image = ""
if opts.zeroing_image:
zeroing_image = opts.zeroing_image
else:
zeroing_image = ""
compression_tools = _GetCompressionTools(opts)
default_ialloc_params = opts.default_iallocator_params
if opts.enabled_user_shutdown:
enabled_user_shutdown = True
else:
enabled_user_shutdown = False
bootstrap.InitCluster(cluster_name=args[0],
secondary_ip=opts.secondary_ip,
vg_name=vg_name,
mac_prefix=opts.mac_prefix,
master_netmask=master_netmask,
master_netdev=master_netdev,
file_storage_dir=opts.file_storage_dir,
shared_file_storage_dir=opts.shared_file_storage_dir,
gluster_storage_dir=opts.gluster_storage_dir,
enabled_hypervisors=hvlist,
hvparams=hvparams,
beparams=beparams,
nicparams=nicparams,
ndparams=ndparams,
diskparams=diskparams,
ipolicy=ipolicy,
candidate_pool_size=opts.candidate_pool_size,
modify_etc_hosts=opts.modify_etc_hosts,
modify_ssh_setup=opts.modify_ssh_setup,
maintain_node_health=opts.maintain_node_health,
drbd_helper=drbd_helper,
uid_pool=uid_pool,
default_iallocator=opts.default_iallocator,
default_iallocator_params=default_ialloc_params,
primary_ip_version=primary_ip_version,
prealloc_wipe_disks=opts.prealloc_wipe_disks,
use_external_mip_script=external_ip_setup_script,
hv_state=hv_state,
disk_state=disk_state,
enabled_disk_templates=enabled_disk_templates,
install_image=install_image,
zeroing_image=zeroing_image,
compression_tools=compression_tools,
enabled_user_shutdown=enabled_user_shutdown,
)
op = opcodes.OpClusterPostInit()
SubmitOpCode(op, opts=opts)
return 0
@UsesRPC
def DestroyCluster(opts, args):
"""Destroy the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
if not opts.yes_do_it:
ToStderr("Destroying a cluster is irreversible. If you really want"
" destroy this cluster, supply the --yes-do-it option.")
return 1
op = opcodes.OpClusterDestroy()
master_uuid = SubmitOpCode(op, opts=opts)
# if we reached this, the opcode didn't fail; we can proceed to
# shutdown all the daemons
bootstrap.FinalizeClusterDestroy(master_uuid)
return 0
def RenameCluster(opts, args):
"""Rename the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the new cluster name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
(cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
new_name = args[0]
if not opts.force:
usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
" connected over the network to the cluster name, the"
" operation is very dangerous as the IP address will be"
" removed from the node and the change may not go through."
" Continue?") % (cluster_name, new_name)
if not AskUser(usertext):
return 1
op = opcodes.OpClusterRename(name=new_name)
result = SubmitOpCode(op, opts=opts, cl=cl)
if result:
ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
return 0
def ActivateMasterIp(opts, args):
"""Activates the master IP.
"""
op = opcodes.OpClusterActivateMasterIp()
SubmitOpCode(op)
return 0
def DeactivateMasterIp(opts, args):
"""Deactivates the master IP.
"""
if not opts.confirm:
usertext = ("This will disable the master IP. All the open connections to"
" the master IP will be closed. To reach the master you will"
" need to use its node IP."
" Continue?")
if not AskUser(usertext):
return 1
op = opcodes.OpClusterDeactivateMasterIp()
SubmitOpCode(op)
return 0
def RedistributeConfig(opts, args):
"""Forces push of the cluster configuration.
@param opts: the command line options selected by the user
@type args: list
@param args: empty list
@rtype: int
@return: the desired exit code
"""
op = opcodes.OpClusterRedistConf()
if | |
<reponame>vishalbelsare/pydmrs
from pydmrs.pydelphin_interface import parse, generate
from pydmrs.mapping.mapping import dmrs_mapping
from pydmrs.graphlang.graphlang import parse_graphlang
import examples.examples_dmrs as examples
if __name__ == '__main__':
# basic functionality
dmrs = examples.the_dog_chases_the_cat()
search_dmrs = parse_graphlang('[1]:_the_q')
replace_dmrs = parse_graphlang('[1]:_a_q')
# iterative, all
assert 'A dog chases a cat.' in generate(dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=True, all_matches=True))
# not iterative, all
assert all(sent in sents for sent, sents in zip(['A dog chases the cat.', 'The dog chases a cat.'], [generate(dmrs) for dmrs in dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=False, all_matches=True)]))
# iterative, not all
assert 'A dog chases the cat.' in generate(dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=True, all_matches=False))
# not iterative, not all
assert 'A dog chases the cat.' in generate(dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=False, all_matches=False))
# original dmrs did not change so far
assert 'The dog chases the cat.' in generate(dmrs)
# iterative, not all
dmrs = examples.the_dog_chases_the_cat()
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False, iterative=True, all_matches=False)
assert 'A dog chases the cat.' in generate(dmrs)
# iterative, all
dmrs = examples.the_dog_chases_the_cat()
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False, iterative=True, all_matches=True)
assert 'A dog chases a cat.' in generate(dmrs)
dmrs = parse('Kim eats and Kim sleeps.')[0]
search_dmrs = parse_graphlang('[4]:node=1 <-1- [2]:node <-l- [1]:_and_c e? -r-> [3]:node -1-> node=1 <-- proper_q; :2 <-lh- :1 -rh-> :3')
replace_dmrs = parse_graphlang('[4]:node <-1- [2]:node <-l- [1]:_and_c e? -r-> [3]:node -1-> :4; :2 <=lh= :1 =rh=> :3')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim eats and sleeps.' in generate(dmrs)
# some examples inspired by examples from the AMR specification
dmrs = parse('He described the mission as a failure.')[0]
search_dmrs = parse_graphlang('[2]:node <-2- *[1]:_describe_v_as e? -3-> [3]:node')
replace_dmrs = parse_graphlang('pronoun_q --> pron x[3sn_s] <-2- [1]:_describe_v_to e? <-2h- *_as_x_subord e[pui--] -1h-> _be_v_id e[ppi--] -1-> [2]:node; :_be_v_id -2-> [3]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'As he described it, the mission is a failure.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'He described the mission as a failure.' in generate(dmrs)
dmrs = parse('The boy can go.')[0]
search_dmrs = parse_graphlang('[1]:_can_v_modal e[p????] -1h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_possible_a_for e[o????] -1h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is possible that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy can go.' in generate(dmrs)
dmrs = parse('The boy can\'t go.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is not possible that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy can\'t go.' in generate(dmrs)
dmrs = parse('The boy must go.')[0]
search_dmrs = parse_graphlang('[1]:_must_v_modal e? -1h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_necessary_a_for e? -1h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is necessary that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy must go.' in generate(dmrs)
dmrs = parse('The boy should go.')[0]
search_dmrs = parse_graphlang('[1]:_should_v_modal e? -1h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_recommend_v_to e? -2h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'That the boy goes, is recommended.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy should go.' in generate(dmrs)
dmrs = parse('The boy is likely to go.')[0]
search_dmrs = parse_graphlang('[1]:_likely_a_1 e? -1h-> [2]:_v e[oui--]')
replace_dmrs = parse_graphlang('[1]:_likely_a_1 e? -1h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is likely that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy is likely to go.' in generate(dmrs)
dmrs = parse('The boy would rather go.')[0]
search_dmrs = parse_graphlang('[1]:_would_v_modal e? -1h-> [2]:_v e? <=1= _rather_a_1 i; :2 -1-> [3]:node')
replace_dmrs = parse_graphlang('[1]:_prefer_v_to e? -2h-> [2]:_v e? -1-> [3]:node <-1- :1')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'The boy prefers to go.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy would rather go.' in generate(dmrs)
dmrs = parse('I don\'t have any money.')[0]
search_dmrs = parse_graphlang('neg e[pui--] -1h-> [1]:_v e? -2-> [2]:node <-- _any_q')
replace_dmrs = parse_graphlang('[1]:_v e? -2-> [2]:node <-- _no_q')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I have no money.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I don\'t have any money.' in generate(dmrs)
dmrs = parse('Kim doesn\'t like any cake.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim likes no cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim doesn\'t like any cake.' in generate(dmrs)
dmrs = parse('The boy doesn\'t think his team will win.')[0]
search_dmrs = parse_graphlang('neg e[pui--] -1h-> [1]:_v e? -2h-> [2]:_v e?')
replace_dmrs = parse_graphlang('[1]:_v e? -2h-> neg e[pui--] -1h-> [2]:_v e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'The boy thinks his team won\'t win.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy doesn\'t think his team will win.' in generate(dmrs)
dmrs = parse('I don\'t believe that Kim likes cake.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I believe that Kim doesn\'t like cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I don\'t believe that Kim likes cake.' in generate(dmrs)
dmrs = parse('I don\'t think that Kim doesn\'t like cake.')[0]
search_dmrs = parse_graphlang('neg e[pui--] -1h-> [1]:_v e? -2h-> neg e[pui--] -1h-> [2]:_v e?')
replace_dmrs = parse_graphlang('[1]:_v e? -2h-> [2]:_v e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I think that Kim likes cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I don\'t think that Kim doesn\'t like cake.' in generate(dmrs)
# Verb particle examples
dmrs = parse('I look you up.')[0]
search_dmrs = parse_graphlang('[1]:_look_v_up e?')
replace_dmrs = parse_graphlang('[1]:_find_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I find you.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I look you up.' in generate(dmrs)
dmrs = parse('Kim carries on eating cake.')[0]
search_dmrs = parse_graphlang('[1]:_carry_v_on e?')
replace_dmrs = parse_graphlang('[1]:_continue_v_2 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim continues eating cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim carries on eating cake.' in generate(dmrs)
dmrs = parse('Alice passed a message on to Bob.')[0]
search_dmrs = parse_graphlang('[1]:_pass_v_on e?')
replace_dmrs = parse_graphlang('[1]:_give_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Alice gave a message to Bob.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Alice passed a message on to Bob.' in generate(dmrs)
dmrs = parse('Bob then gave Alice back the message.')[0]
search_dmrs = parse_graphlang('[1]:node <-2- [2]:_give_v_back e? -3-> [3]:node')
replace_dmrs = parse_graphlang('[3]:node <-2- [2]:_return_v_to e? -3-> [1]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Bob then returned the message to Alice.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Bob then gave Alice back the message.' in generate(dmrs)
dmrs = parse('He keeps on complaining.')[0]
search_dmrs = parse_graphlang('[2]:node <-1- [1]:_keep_v_on e? -2h-> [3]:_v e[pui-+] -1-> :2')
replace_dmrs = parse_graphlang('[1]:_continue_v_2 e? -1h-> [3]:_v e[oui--] -1-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'He continues to complain.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'He keeps on complaining.' in generate(dmrs)
dmrs = parse('He takes on great responsibility.')[0]
search_dmrs = parse_graphlang('[1]:_take_v_on e?')
replace_dmrs = parse_graphlang('[1]:_accept_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'He accepts great responsibility.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'He takes on great responsibility.' in generate(dmrs)
# determinerless PPs
dmrs = parse('I found you at last.')[0]
search_dmrs = parse_graphlang('[1]:_at_p e[pui--] -2-> _last_n_1 x[3s_+_] <-- idiom_q_i')
replace_dmrs = parse_graphlang('[1]:_final_a_1 e[pui--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I found you finally.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I found you at last.' in generate(dmrs)
dmrs = parse('I am on edge.')[0]
search_dmrs = parse_graphlang('[1]:_on_p e? -2-> _edge_n_of x[3s_+_] <-- idiom_q_i')
replace_dmrs = parse_graphlang('[1]:_nervous_a_about e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I am nervous.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I am on edge.' in generate(dmrs)
dmrs = parse('You can see the insects at close range.')[0]
search_dmrs = parse_graphlang('[1]:_at_p e[pui--] -2-> _range_n_of x[3s___] <-- udef_q; :_range_n_of <=1= _close_a_to e[p____]')
replace_dmrs = parse_graphlang('[1]:_from_p_state e[pui--] -2-> _distance_n_1 x[3s_+_] <-- _a_q; :_distance_n_1 <=1= _small_a_1 e[p____]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'You can see the insects from a small distance.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'You can see the insects at close range.' in generate(dmrs)
# idioms
dmrs = parse('Kim often took advantage of Sandy.')[0]
search_dmrs = parse_graphlang('[2]:node <-3- [1]:_take_v_of-i e? -2-> _advantage_n_i x[3s_+_] <-- idiom_q_i')
replace_dmrs = parse_graphlang('[1]:_benefit_v_from e? -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim often benefitted from Sandy.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim often took advantage of Sandy.' in generate(dmrs)
dmrs = parse('The government keeps tabs on everyone.')[0]
search_dmrs = parse_graphlang('[2]:node <-3- | |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import print_function, division, absolute_import
import os.path
import sys
dependencyDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../Dep")
sys.path.insert(0, dependencyDir)
import re
from time import mktime
from datetime import datetime
from fontTools.misc.timeTools import epoch_diff
from fontTools.ttLib import newTable
from fontTools.ttLib.tables.ttProgram import Program
from otRebuilder.Lib import Builders
from otRebuilder.Lib import Constants
from otRebuilder.Lib import Workers
from otRebuilder.Lib import Fixer
class Rebuilder(Workers.Worker):
def __init__(self, ttfontObj, jobsObj, configDict):
super(Rebuilder, self).__init__(ttfontObj, jobsObj)
self.config = configDict # None if -c is not specified
def rebuildGasp(self):
gasp = newTable("gasp")
gasp.version = 1
gasp.gaspRange = {65535: 10}
self.font["gasp"] = gasp
return
# Create global instruction table with basic rendering settings
def rebuildPrep(self):
hintProg = Program()
hintProg.fromBytecode([184, 1, 255, 133, 184, 0, 4, 141])
prep = newTable("prep")
prep.program = hintProg
self.font["prep"] = prep
return
def rebuildDSIG(self):
DSIG = newTable("DSIG")
DSIG.ulVersion = 1
DSIG.usNumSigs = 0
DSIG.usFlag = 0
DSIG.signatureRecords = []
self.font["DSIG"] = DSIG
return
def rebuildCmap(self):
cmap = self.font.get("cmap")
cmap.tableVersion = 0
# sourceSub = [winBMP, winFull, macRoman, macBMP, macFull, macLastResort]
sourceSub = [None for i in range(7)]
unsupported = [] # Unsupported Unicode subtables, like winSymbol, macUVS, etc.
newSub = []
newMacRoman = None
for subtable in cmap.tables:
if subtable.isUnicode():
if Workers.CmapWorker.isLastResort(subtable):
sourceSub[5] = subtable # macLastResort
elif subtable.format == 12: # Full repertoire
if subtable.platformID == 3:
sourceSub[1] = subtable # winFull
elif subtable.platformID == 0:
sourceSub[4] = subtable # macFull
else:
pass
elif subtable.format in [4, 6]: # BMP
if subtable.platformID == 3:
sourceSub[0] = subtable # winBMP
elif subtable.platformID == 0:
sourceSub[3] = subtable # macBMP
else:
pass
else:
unsupported.append(subtable)
elif Workers.CmapWorker.isMacRoman(subtable):
sourceSub[2] = subtable # macRoman
else:
continue
# Build all from top to bottom.
# Priority ranking: macLastResort > winFull > macFull > winBMP > macBMP > macRoman
if sourceSub[5]: # macLastResort, 4 subtables in total
newSub.extend(Workers.CmapWorker.subtables_buildfmt13sFromLastResort(sourceSub[5]))
newMacRoman = Workers.CmapWorker.subtable_buildMacRomanFromUnicode(sourceSub[5])
elif sourceSub[1]: # winFull, 5 subtables in total
newSub.extend(Workers.CmapWorker.subtables_buildUnicodeAllFromFull(sourceSub[1]))
newMacRoman = Workers.CmapWorker.subtable_buildMacRomanFromUnicode(sourceSub[1])
elif sourceSub[4]: # macFull, 5 subtables in total
newSub.extend(Workers.CmapWorker.subtables_buildUnicodeAllFromFull(sourceSub[4]))
newMacRoman = Workers.CmapWorker.subtable_buildMacRomanFromUnicode(sourceSub[4])
elif sourceSub[0]: # winBMP, 3 subtables in total
newSub.extend(Workers.CmapWorker.subtables_buildFmt4sFromBMP(sourceSub[0]))
newMacRoman = Workers.CmapWorker.subtable_buildMacRomanFromUnicode(sourceSub[0])
elif sourceSub[3]: # macBMP, 3 subtables in total
newSub.extend(Workers.CmapWorker.subtables_buildFmt4sFromBMP(sourceSub[3]))
newMacRoman = Workers.CmapWorker.subtable_buildMacRomanFromUnicode(sourceSub[3])
elif sourceSub[2]: # macRoman, 3 subtables in total
newSub.extend(Workers.CmapWorker.subtables_buildFmt4sFromMacRoman(sourceSub[2]))
newMacRoman = sourceSub[2]
else:
pass
# **Mac Office 2011** sometimes overrides macRoman to all subtables.
# In case of that, we temporarily turn it off.
if newMacRoman and not self.jobs.rebuild_macOffice:
newSub.append(newMacRoman)
# Add unsupported subtables.
newSub.extend(unsupported)
# Apply changes.
cmap.tables = newSub
# Update consistencies between `cmap` and `name` if configDict is not specified.
if self.jobs.fix_name:
fixer = Fixer.Fixer(self.font, self.jobs)
fixer.fixName()
del fixer
return
def rebuildByConfig(self):
if self.config is None:
return
self.__updateCFF()
self.__updateHead()
self.__updateHhea()
self.__updateVhea()
self.__updatePost()
self.__updateOS2f2()
self.__rebuildName()
return
# This method must be called first so that rebuildName() can update its psName property later.
def __updateCFF(self):
cffT = self.font.get("CFF ")
if not cffT:
return
general = self.config.get("General")
style = self.config.get("Style")
name = self.config.get("Name")
builder = Builders.cffTopDictBuilder()
if general: # Version priority: `head`.fontRevision < *specified*
builder.setVersion(self.font["head"].fontRevision)
builder.setVersion(general.get("version"))
builder.setROS(general.get("cidRegistry"),
general.get("cidOrdering"),
general.get("cidSupplement")
)
if style:
styleLink = style.get("styleLink")
weightScale = style.get("weightScale")
if styleLink in range(0, 5):
if styleLink == 2:
builder.setItalicAngle(0.0)
builder.setWeight(Constants.STANDARD_WEIGHTS[6])
elif styleLink == 3:
builder.setItalicAngle(Constants.DEFAULT_ITALIC_ANGLE)
builder.setWeight(Constants.STANDARD_WEIGHTS[3])
elif styleLink == 4:
builder.setItalicAngle(Constants.DEFAULT_ITALIC_ANGLE)
builder.setWeight(Constants.STANDARD_WEIGHTS[6])
else:
builder.setItalicAngle(0.0)
builder.setWeight(Constants.STANDARD_WEIGHTS[3])
if weightScale in range(1, 11):
builder.setWeight(Constants.STANDARD_WEIGHTS[weightScale - 1])
builder.setMonospaced(style.get("isMonospaced"))
builder.setItalicAngle(style.get("italicAngle"))
builder.setUnderlinePosition(style.get("underlinePosition"))
builder.setUnderlineThickness(style.get("underlineThickness"))
if name and name.get("en"):
family = self.__loadUstr(name["en"].get("fontFamily"))
subfamily = self.__loadUstr(name["en"].get("fontSubfamily"))
fullName = self.__loadUstr(name["en"].get("fontFullName"))
if family:
builder.clearCFFnameMenu(cffT)
else: # No family, no update
builder.applyToCFFtable(cffT)
return
if not fullName and (family and subfamily):
fullName = family + u" " + subfamily
builder.setFamily(family)
builder.setFullName(fullName)
builder.setPostScriptName(name["en"].get("postScriptName"))
builder.setCopyright(name["en"].get("copyright"))
builder.setTrademark(name["en"].get("trademark"))
builder.applyToCFFtable(cffT)
return
def __updateHead(self):
headT = self.font.get("head")
if not headT:
return
general = self.config.get("General")
style = self.config.get("Style")
if general:
version = general.get("version")
createdTime = general.get("createdTime")
modifiedTime = general.get("modifiedTime")
if isinstance(version, float) or isinstance(version, int):
headT.fontRevision = float(abs(version))
if isinstance(createdTime, datetime):
headT.created = long(mktime(datetime.timetuple(createdTime)) - epoch_diff)
if isinstance(modifiedTime, datetime):
headT.modified = long(mktime(datetime.timetuple(modifiedTime)) - epoch_diff)
self.font.recalcTimestamp = False
if style:
styleLink = style.get("styleLink")
widthScale = style.get("widthScale")
if styleLink in range(0, 5):
# Clear related bits first
headT.macStyle &= ~0b11
if styleLink == Constants.STYLELINK_BOLD:
headT.macStyle |= 1
elif styleLink == Constants.STYLELINK_ITALIC:
headT.macStyle |= 1<<1
elif styleLink == Constants.STYLELINK_BOLDITALIC:
headT.macStyle |= 1
headT.macStyle |= 1<<1
else:
pass
if widthScale in range(1, 10):
headT.macStyle &= ~(0b11<<5)
if widthScale < 5:
headT.macStyle |= 1<<5
elif widthScale > 5:
headT.macStyle |= 1<<6
else:
pass
return
def __updateHhea(self):
hheaT = self.font.get("hhea")
if not hheaT:
return
metrics = self.config.get("Metrics")
if metrics:
hheaAscender = metrics.get("hheaAscender")
hheaDescender = metrics.get("hheaDescender")
hheaLineGap = metrics.get("hheaLineGap")
if isinstance(hheaAscender, float) or isinstance(hheaAscender, int):
hheaT.ascent = int(hheaAscender)
if isinstance(hheaDescender, float) or isinstance(hheaDescender, int):
hheaT.descent = int(hheaDescender)
if isinstance(hheaLineGap, float) or isinstance(hheaLineGap, int):
hheaT.lineGap = int(hheaLineGap)
return
def __updateVhea(self):
vheaT = self.font.get("vhea")
if not vheaT:
return
metrics = self.config.get("Metrics")
if metrics:
vheaAscender = metrics.get("vheaAscender")
vheaDescender = metrics.get("vheaDescender")
vheaLineGap = metrics.get("vheaLineGap")
if isinstance(vheaAscender, float) or isinstance(vheaAscender, int):
vheaT.ascent = int(vheaAscender)
if isinstance(vheaDescender, float) or isinstance(vheaDescender, int):
vheaT.descent = int(vheaDescender)
if isinstance(vheaLineGap, float) or isinstance(vheaLineGap, int):
vheaT.lineGap = int(vheaLineGap)
return
def __updatePost(self):
postT = self.font.get("post")
if not postT:
return
style = self.config.get("Style")
if style:
isMonospaced = style.get("isMonospaced")
styleLink = style.get("styleLink")
italicAngle = style.get("italicAngle")
underlinePosition = style.get("underlinePosition")
underlineThickness = style.get("underlineThickness")
if isinstance(isMonospaced, bool):
if isMonospaced:
postT.isFixedPitch = 1
else:
postT.isFixedPitch = 0
if styleLink in range(0, 5):
if styleLink > 2:
postT.italicAngle = Constants.DEFAULT_ITALIC_ANGLE
else:
postT.italicAngle = 0.0
if isinstance(italicAngle, float) or isinstance(italicAngle, int):
postT.italicAngle = float(italicAngle)
if isinstance(underlinePosition, float) or isinstance(underlinePosition, int):
postT.underlinePosition = int(underlinePosition)
if isinstance(underlineThickness, float) or isinstance(underlineThickness, int):
postT.underlineThickness = int(underlineThickness)
return
def __updateOS2f2(self):
OS2f2T = self.font.get("OS/2")
if not OS2f2T:
return
self.__updateOS2f2_addNewAttrs()
general = self.config.get("General")
name = self.config.get("Name")
metrics = self.config.get("Metrics")
style = self.config.get("Style")
if general:
embeddingRestriction = general.get("embeddingRestriction")
activeCodepages = general.get("codepages")
if embeddingRestriction in range(0, 4):
if embeddingRestriction == Constants.EMBED_EDITABLE:
OS2f2T.fsType = 8
elif embeddingRestriction == Constants.EMBED_PREVIEW_AND_PRINT:
OS2f2T.fsType = 4
elif embeddingRestriction == Constants.EMBED_RESTRICTED:
OS2f2T.fsType = 2
else: # No Restriction
OS2f2T.fsType = 0
if isinstance(activeCodepages, list) and \
(OS2f2T.version > 0 or self.jobs.rebuild_allowUpgrade):
OS2f2T.ulCodePageRange1 = 0
OS2f2T.ulCodePageRange2 = 0
for codepage in activeCodepages:
if Constants.CHARSET_TO_CODEPAGE_RANGE_1.has_key(codepage):
OS2f2T.ulCodePageRange1 |= 1<<Constants.CHARSET_TO_CODEPAGE_RANGE_1[codepage]
elif Constants.CHARSET_TO_CODEPAGE_RANGE_2.has_key(codepage):
OS2f2T.ulCodePageRange2 |= 1<<Constants.CHARSET_TO_CODEPAGE_RANGE_2[codepage]
else:
continue
if OS2f2T.version < 1:
OS2f2T.version = 1
if name and name.get("en") and name["en"].get("distributorID"):
uArcID = re.sub(r"[^A-Za-z0-9]+", r"", name["en"]["distributorID"])
arcID = uArcID.encode("ascii")
arcID += " "
OS2f2T.achVendID = arcID[:4]
if metrics:
typoAscender = metrics.get("typoAscender")
typoDescender = metrics.get("typoDescender")
typoLineGap = metrics.get("typoLineGap")
winAscender = metrics.get("winAscender")
winDescender = metrics.get("winDescender")
if isinstance(typoAscender, float) or isinstance(typoAscender, int):
OS2f2T.sTypoAscender = int(typoAscender)
if isinstance(typoDescender, float) or isinstance(typoDescender, int):
OS2f2T.sTypoDescender = int(typoDescender)
if isinstance(typoLineGap, float) or isinstance(typoLineGap, int):
OS2f2T.sTypoLineGap = int(typoLineGap)
if isinstance(winAscender, float) or isinstance(winAscender, int):
OS2f2T.usWinAscent = abs(int(winAscender))
if isinstance(winDescender, float) or isinstance(winDescender, int):
OS2f2T.usWinDescent = abs(int(winDescender))
if style:
widthScale = style.get("widthScale")
weightScale = style.get("weightScale")
styleLink = style.get("styleLink")
useTypoMetrics = style.get("useTypoMetrics")
forcePreferredFamily = style.get("forcePreferredFamily")
isMonospaced = style.get("isMonospaced")
monoLatinWidth = style.get("monoLatinWidth")
ibmClass = style.get("IBM")
panose = style.get("PANOSE")
if widthScale in range(1, 10):
OS2f2T.usWidthClass = Constants.WIDTH_SCALES[widthScale - 1]
self.__updateOS2f2_width2Panose(widthScale, OS2f2T.panose)
if weightScale in range(1, 11):
# OS2f2T.usWeightClass = Constants.STANDARD_WEIGHT_SCALES[weightScale - 1]
OS2f2T.usWeightClass = Constants.WIN_SAFE_WEIGHT_SCALES[weightScale - 1]
if OS2f2T.panose.bFamilyType in [2, 3, 4]:
OS2f2T.panose.bWeight = weightScale + 1
OS2f2T.fsSelection &= ~0b1111110 # Clear regular, bold and legacy bits
# Set fsSelection to regular only when usWeightClass == 400 and fsSelection != italic.
if (weightScale == 4) and not (OS2f2T.fsSelection & 1):
OS2f2T.fsSelection |= 1<<6
# We don't worry about fsSelection == boldItalic.
elif weightScale > 6:
OS2f2T.fsSelection |= 1<<5
else:
pass
if styleLink in range(0, 5):
if styleLink == Constants.STYLELINK_REGULAR:
# Regular style-link doesn't have to set fsSelection to regular, such as
# Arial Nova Light (none) or Arial Nova Black (bold).
OS2f2T.fsSelection &= ~0b0011111
elif styleLink == Constants.STYLELINK_BOLD:
# But bold style-link does.
OS2f2T.fsSelection |= 1<<5
OS2f2T.fsSelection &= ~0b1011111
elif styleLink == Constants.STYLELINK_ITALIC:
# For italic style-link the fsSelection regular must be turned off.
OS2f2T.fsSelection |= 1
OS2f2T.fsSelection &= ~0b1011110
elif styleLink == Constants.STYLELINK_BOLDITALIC:
OS2f2T.fsSelection |= 1<<5
OS2f2T.fsSelection |= 1
OS2f2T.fsSelection &= ~0b1011110
else: # Constants.STYLELINK_NONE
pass
if isinstance(useTypoMetrics, bool) and \
(OS2f2T.version > 3 | |
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddressResource, or the result of cls(response)
:rtype: ~azure.mgmt.edgeorder.v2020_12_01_preview.models.AddressResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddressResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
accept = "application/json"
# Construct URL
url = self.get_address_by_name.metadata['url'] # type: ignore
path_format_arguments = {
'addressName': self._serialize.url("address_name", address_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AddressResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_address_by_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses/{addressName}'} # type: ignore
def _create_address_initial(
self,
address_name, # type: str
resource_group_name, # type: str
address_resource, # type: "_models.AddressResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.AddressResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AddressResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_address_initial.metadata['url'] # type: ignore
path_format_arguments = {
'addressName': self._serialize.url("address_name", address_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(address_resource, 'AddressResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AddressResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_address_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses/{addressName}'} # type: ignore
def begin_create_address(
self,
address_name, # type: str
resource_group_name, # type: str
address_resource, # type: "_models.AddressResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AddressResource"]
"""Creates a new address with the specified parameters. Existing address cannot be updated with
this API and should instead be updated with the Update address API.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param address_resource: Address details from request body.
:type address_resource: ~azure.mgmt.edgeorder.v2020_12_01_preview.models.AddressResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AddressResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.edgeorder.v2020_12_01_preview.models.AddressResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddressResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_address_initial(
address_name=address_name,
resource_group_name=resource_group_name,
address_resource=address_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AddressResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'addressName': self._serialize.url("address_name", address_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses/{addressName}'} # type: ignore
def _delete_address_by_name_initial(
self,
address_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_address_by_name_initial.metadata['url'] # type: ignore
path_format_arguments = {
'addressName': self._serialize.url("address_name", address_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_address_by_name_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses/{addressName}'} # type: ignore
def begin_delete_address_by_name(
self,
address_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an address.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_address_by_name_initial(
address_name=address_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'addressName': self._serialize.url("address_name", address_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_address_by_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses/{addressName}'} # type: ignore
def _update_address_initial(
self,
address_name, # type: str
resource_group_name, # type: str
address_update_parameter, # type: "_models.AddressUpdateParameter"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.AddressResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AddressResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_address_initial.metadata['url'] # type: ignore
path_format_arguments = {
'addressName': self._serialize.url("address_name", address_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
| |
row in self.rows:
# Remove any double quotes from around the data before storing
self.qstn_metadata[row.descriptor] = row.value.strip('"')
# -------------------------------------------------------------------------
def get(self, value, default=None):
"""
This will return a single metadata value held by the widget
"""
if value in self.qstn_metadata:
return self.qstn_metadata[value]
else:
return default
# -------------------------------------------------------------------------
def set(self, value, data):
"""
This will store a single metadata value
"""
self.qstn_metadata[value] = data
# -------------------------------------------------------------------------
def getAnswer(self):
"""
Return the value of the answer for this question
"""
if "answer" in self.question:
answer = self.question.answer
else:
answer = ""
return answer
# -------------------------------------------------------------------------
def repr(self, value=None):
"""
function to format the answer, which can be passed in
"""
if value == None:
value = self.getAnswer()
return value
# -------------------------------------------------------------------------
def loadAnswer(self, complete_id, question_id, forceDB=False):
"""
This will return a value held by the widget
The value can be held in different locations
1) In the widget itself:
2) On the database: table.survey_complete
"""
value = None
self._store_metadata(question_id)
if "answer" in self.question and \
self.question.complete_id == complete_id and \
forceDB == False:
answer = self.question.answer
else:
query = (self.atable.complete_id == complete_id) & \
(self.atable.question_id == question_id)
row = current.db(query).select(limitby=(0, 1)).first()
if row != None:
value = row.value
self.question["answer"] = value
self.question["complete_id"] = complete_id
return value
# -------------------------------------------------------------------------
def initDisplay(self, **attr):
"""
This method set's up the variables that will be used by all
display methods of fields for the question type.
It uses the metadata to define the look of the field
"""
if "question_id" in attr:
self.id = attr["question_id"]
if self.id == None:
raise Exception("Need to specify the question_id for this QuestionType")
qstn_id = self.id
self._store_metadata(qstn_id)
attr["_name"] = self.question.code
self.attr = attr
# -------------------------------------------------------------------------
def display(self, **attr):
"""
This displays the widget on a web form. It uses the layout
function to control how the widget is displayed
"""
self.initDisplay(**attr)
value = self.getAnswer()
input = self.webwidget.widget(self.field, value, **self.attr)
return self.layout(self.question.name, input, **attr)
# -------------------------------------------------------------------------
def fullName(self):
if "parentCode" in self.question:
db = current.db
query = db(self.qtable.code == self.question.parentCode)
record = query.select(self.qtable.id,
self.qtable.name,
limitby=(0, 1)).first()
if record != None:
parentWidget = survey_question_type["Grid"](record.id)
subHeading = parentWidget.getHeading(self.question.parentNumber)
return "%s (%s)" % (self.question.name,
subHeading)
return self.question.name
# -------------------------------------------------------------------------
def layout(self, label, widget, **attr):
"""
This lays the label widget that is passed in on the screen.
Currently it has a single default layout mechanism but in the
future it will be possible to add more which will be controlled
vis the attr passed into display and stored in self.attr
"""
if "display" in attr:
display = attr["display"]
else:
display = "Default"
if display == "Default":
elements = []
elements.append(TR(TH(label), TD(widget),
_class="survey_question"))
return TAG[""](elements)
elif display == "Control Only":
return TD(widget)
# -------------------------------------------------------------------------
def onaccept(self, value):
"""
Method to format the value that has just been put on the database
"""
return value
# -------------------------------------------------------------------------
def type_represent(self):
"""
Display the type in a DIV for displaying on the screen
"""
return DIV(self.typeDescription, _class="surveyWidgetType")
# -------------------------------------------------------------------------
def db_type(self):
"""
Return the real database table type for this question
This assumes that the value is valid
"""
return "string"
# -------------------------------------------------------------------------
def _Tquestion(self, langDict):
"""
Function to translate the question using the dictionary passed in
"""
return survey_T(self.question["name"], langDict)
# -------------------------------------------------------------------------
def getLabelSize(self, maxWidth = 20):
"""
function to return the size of the label, in terms of merged
MatrixElements
"""
labelSize = (0,0)
if self.label:
labelWidth = maxWidth/2
if not self.labelLeft:
labelWidth = self.xlsWidgetSize[0] + 1
_TQstn = self._Tquestion(self.langDict)
labelSize = (labelWidth, len(_TQstn)/(4 * labelWidth / 3) + 1)
return labelSize
# -------------------------------------------------------------------------
def getWidgetSize(self, maxWidth = 20):
"""
function to return the size of the input control, in terms of merged
MatrixElements
"""
return (self.xlsWidgetSize[0] + 1, self.xlsWidgetSize[1] + 1)
# -------------------------------------------------------------------------
def getMatrixSize(self):
"""
function to return the size of the widget
"""
labelSize = self.getLabelSize()
widgetSize = self.getWidgetSize()
if self.labelLeft:
return (max(labelSize[1],widgetSize[1]) + self.xlsMargin[1],
labelSize[0] + widgetSize[0] + self.xlsMargin[0])
else:
return (labelSize[1] + widgetSize[1] + self.xlsMargin[1],
max(labelSize[0],widgetSize[0]) + self.xlsMargin[0])
# -------------------------------------------------------------------------
def canGrowHorizontal(self):
return False
# -------------------------------------------------------------------------
def canGrowVertical(self):
return False
# -------------------------------------------------------------------------
def growHorizontal(self, amount):
if self.canGrowHorizontal():
self.xlsWidgetSize[0] += amount
# -------------------------------------------------------------------------
def growVertical(self, amount):
if self.canGrowHorizontal():
self.xlsWidgetSize[1] += amount
# -------------------------------------------------------------------------
def addToHorizontalMargin(self, amount):
self.xlsMargin[0] += amount
# -------------------------------------------------------------------------
def addToVerticalMargin(self, amount):
self.xlsMargin[1] += amount
# -------------------------------------------------------------------------
def addPaddingAroundWidget(self, matrix, startrow, startcol, lWidth, lHeight, wWidth, wHeight):
if self.labelLeft:
# Add padding below the input boxes
if lHeight > wHeight:
cellPadding = MatrixElement(startrow + wHeight,startcol + lWidth,"", style="styleText")
cellPadding.merge(wWidth-1,lHeight - wHeight - 1)
matrix.addElement(cellPadding)
# Add padding below the label
if lHeight < wHeight:
cellPadding = MatrixElement(startrow + lHeight,startcol,"", style="styleText")
cellPadding.merge(lWidth-1,wHeight - lHeight - 1)
matrix.addElement(cellPadding)
height = wHeight + 1
else:
# Add padding to make the widget the same width as the label
if lWidth > wWidth:
cellPadding = MatrixElement(startrow+lHeight,startcol+wWidth,"", style="styleText")
cellPadding.merge(lWidth - wWidth - 1, lHeight-1)
matrix.addElement(cellPadding)
# Add padding to make the label the same width as the widget
if lWidth < wWidth:
cellPadding = MatrixElement(startrow,startcol + lWidth,"", style="styleText")
cellPadding.merge(wWidth - lWidth - 1, wHeight-1)
matrix.addElement(cellPadding)
# -------------------------------------------------------------------------
def addPaddingToCell(self,
matrix,
startrow,
startcol,
endrow,
endcol,
):
# Add widget padding
if self.xlsMargin[0] > 0:
cellPadding = MatrixElement(startrow,endcol,"", style="styleText")
cellPadding.merge(self.xlsMargin[0]-1,endrow - startrow -1)
matrix.addElement(cellPadding)
if self.xlsMargin[1] > 0:
cellPadding = MatrixElement(endrow,startcol,"", style="styleText")
cellPadding.merge(endcol-startcol+self.xlsMargin[0]-1,self.xlsMargin[1]-1)
matrix.addElement(cellPadding)
# -------------------------------------------------------------------------
def writeToMatrix(self,
matrix,
row,
col,
langDict=dict(),
answerMatrix=None
):
"""
Function to write out basic details to the matrix object
"""
self._store_metadata()
startrow = row
startcol = col
mergeLH = 0
mergeLV = 0
height = 0
width = 0
if self.label:
_TQstn = self._Tquestion(langDict)
cell = MatrixElement(row,
col,
_TQstn,
style="styleSubHeader")
(width, height) = self.getLabelSize()
mergeLH = width - 1
mergeLV = height - 1
cell.merge(mergeLH,mergeLV)
matrix.addElement(cell)
if self.labelLeft:
col += 1 + mergeLH
else:
row += 1 + mergeLV
cell = MatrixElement(row,col,"", style="styleInput")
mergeWH = self.xlsWidgetSize[0]
mergeWV = self.xlsWidgetSize[1]
cell.merge(mergeWH,mergeWV)
matrix.addElement(cell)
if self.labelLeft:
height = max(height, mergeWV + 1)
width += mergeWH + 1
else:
height += mergeWV + 1
width = max(width, mergeWH + 1)
self.addPaddingAroundWidget(matrix, startrow, startcol, mergeLH+1, mergeLV+1, mergeWH+1, mergeWV+1)
# Add widget padding
self.addPaddingToCell(matrix, startrow, startcol, startrow + height, startcol + width)
height += self.xlsMargin[1]
width += self.xlsMargin[0]
# Add details to the answerMatrix (if required)
if answerMatrix != None:
answerRow = answerMatrix.lastRow+1
cell = MatrixElement(answerRow, 0, self.question["code"],
style="styleSubHeader")
answerMatrix.addElement(cell)
cell = MatrixElement(answerRow, 3,
self.rowcol_to_cell(row, col),
style="styleText")
answerMatrix.addElement(cell)
endcol = startcol+width
endrow = startrow+height
if DEBUG:
# Only for debugging purposes
self.verifyCoords(endrow, endcol)
return (endrow, endcol)
#if self.labelLeft:
# return (row+self.xlsMargin[1]+height, col+self.xlsMargin[0]+mergeWH)
#else:
# return (row+self.xlsMargin[1]+mergeLV+mergeWV, col+self.xlsMargin[0]+max(mergeLH,mergeWH))
# -------------------------------------------------------------------------
def writeToRTF(self, ss, langDict):
"""
Function to write the basic question details to a rtf document.
The basic details will be written to Cell objects that can be
added to a row in a table object.
"""
from PyRTF import Paragraph, Cell, B, BorderPS, FramePS
thin_edge = BorderPS( width=20, style=BorderPS.SINGLE )
thin_frame = FramePS( thin_edge, thin_edge, thin_edge, thin_edge )
line = []
p = Paragraph(ss.ParagraphStyles.Normal)
p.append(B(str(self.fullName())))
line.append(Cell(p, thin_frame))
p = Paragraph(ss.ParagraphStyles.NormalGrey)
p.append()
line.append(Cell(p, thin_frame))
return line
# -------------------------------------------------------------------------
def verifyCoords(self, endrow, endcol):
(width, height) = self.getMatrixSize()
calcrow = self.startPosn[1] + width
calccol = self.startPosn[0] + height
error = False
if calcrow != endrow:
error = True
if calccol != endcol:
error = True
if error:
w_code = self.question["code"]
msg = "Coord Verification Error for widget %s, startPosn:(%s, %s), expected:(%s, %s), observed:(%s, %s)" % (w_code, self.startPosn[1], self.startPosn[0], endrow, endcol, calcrow, calccol)
current.log.error(msg)
######################################################################
# Functions not fully implemented or used
######################################################################
def validate(self, valueList, qstn_id):
"""
This will validate the data passed in to the widget
NOTE: Not currently used but will be used when the UI supports the
validation of data entered in to the web form
"""
if len(valueList) == 0:
return self.ANSWER_MISSING
data = value(valueList, 0)
if data == None:
return self.ANSWER_MISSING
length = self.get("Length")
if length != None and length(data) > length:
return ANSWER_PARTLY_VALID
| |
#!/usr/bin/env python
__author___= "<NAME>"
__copyright__= "Copyright 2017"
__license__= "Apache License, Version 2.0"
__email__= "<EMAIL>"
'''
This program parses .jimple files (Soot output) from each provided Java project (app)
and extracts API methods and exceptions that developers use (to handle called API methods).
Then, it gets the API methods and exceptions that are documented in the corresponding
version of the API reference (depending on what API version each app uses).
Finally, it applies set operations on both datasets (app and API)
to find documented/undocumented exceptions.
Input:
Folder with jimple files (Soot output) from each .jar file (app);
the folder with the .jimple files has the same name with the .jar file,
the soot_log.txt file keeps records regarding the processing when Soot is runnig.
Output:
exceptions that are documented/undocumented (XX-doc-excps.txt, XX-undoc-excps.txt)
in the used API version; where XX is the name of the given Java API.
Instructions to independently run the script:
- before you run the script, cd to ../eRec/apps/jar/ (path for the folder with the output .jimple files)
- in the following program, the path_jar is for the folder with the apps --- .jar files
- in the following program, set ../eRec/api/JSON/ (path for the folder with the JSON files --- different API versions)
Note 1: soot_log.txt doen't keep the API version that the app uses (as in the case of the Android).
For this, we need to set on our own the API version in the java_version field below.
Note 2: this script can be used for the Java API and for Java libraries (e.g. apache-commons-lang).
We take into consideration only the .java files typically found in src/(main)/java package of the libraries.
E.g. see the structure of the packages of the source code of the apache-commons-lang3-3.6 for instance:
(https://github.com/apache/commons-lang/tree/master/src/main/java/org/apache/commons/lang3)
'''
# for regex
import re
# for os walking
import os
# for json decoding
import json
import sys
# for dictionaries
from collections import defaultdict
from collections import OrderedDict
# for set operations
from sets import Set
# dictionary of API methods and exceptions in a single .jar
global app_dict
app_dict = OrderedDict([])
# patterns for finding method signatures
p1 = "\s[a-zA-Z]+[a-zA-Z\$0-9]*\([a-zA-Z\$\_0-9\[\]\.\,\s]*\)[\s]*(throws)*.*$"
p2 = "[\s]*\{[\s]*$"
# pattern for constructors
p3 = "\<[c]*[l]*init\>\([a-zA-Z\$\_0-9\[\]\.\,\s]*\)"
# patterns for thrown exceptions
p4 = "catch\s"
p6 = "(specialinvoke|staticinvoke|virtualinvoke)\s"
p7 = "throw\s"
p8 = "[a-zA-Z]+[a-zA-Z\$0-9]*\([a-zA-Z\$\_0-9\[\]\.\,\s]*\)"
p9 = "\$r[0-9]+"
p10 = "[a-z]+[a-zA-Z0-9\$]*\(.*\)"
# pattern to locate API method in jimple files from Android apks or jars
# (e.g. $r5 = virtualinvoke $r3.<android.view.View: android.view.View findViewById(int)>($i0);)
#p11 = "(specialinvoke|staticinvoke|virtualinvoke).*[\<](java)\..*\s[a-z]+[a-zA-Z\$0-9]*\([a-zA-Z\$\_0-9\[\]\.\,\s]*\)"
# pattern for catch clause
p12 = "catch.*[\s]+from[\s]+label[0-9]+[\s]+to[\s]+label[0-9]+[\s]+with[\s]+label[0-9]+"
# pattern for new label
p13 = "^[\s]*label[0-9]+\:[\s]*$"
# patterns for cases in a method
p14 = "lookupswitch\(\$i[0-9]+\)"
# folder for the jimple files from the .jar files
global path_jar
path_jar = sys.argv[1] # "../eRec/apps/jar/"
# folder for the JSON files (different versions of the API)
global path_JSON
path_JSON = sys.argv[2] # "../eRec/api/JSON/"
global java_version
java_version = sys.argv[3] # e.g. java 7, java 8, apache-commons-lang-3.6 etc.
global analysis_type
analysis_type = sys.argv[4] # java, apache-commons-lang, etc.
# Open a new file for doc exceptions
global fi
fi = open(analysis_type+"-doc-excps.txt", "wb")
# Open a new file for undoc exceptions
global fo
fo = open(analysis_type+"-undoc-excps.txt", "wb")
global p11
p11 = "(specialinvoke|staticinvoke|virtualinvoke).*[\<]("+analysis_type+")\..*\s[a-z]+[a-zA-Z\$0-9]*\([a-zA-Z\$\_0-9\[\]\.\,\s]*\)"
def main():
# open and parse apk files
read_folder(path_jar, java_version)
# Close files
fi.close()
fo.close()
def read_folder(path, java_version):
app_name = ""
# list app folders (first level subfolders)
dir_list = os.walk(path).next()[1]
for k, l in enumerate(dir_list):
app_name = dir_list[k]
if ((re.search("^derbyTesting$", app_name)) or (re.search("^asm-3.1$", app_name)) or (re.search("^antlr-3.1.3$", app_name))):
print "Possible issues with these .jar files ..."
continue
else:
print app_name
app_path = path + "/" + dir_list[k]
# search in the files of the app folder
files = os.listdir(app_path)
if ((len(files) > 2) and (java_version != "")):
# update doc_dictionary for the currently used API version
doc_dict = get_right_API_version(java_version)
read_files(files, app_path, java_version, doc_dict, app_name)
def read_files(files, app_path, api_ver, doc_dict, app_name):
for f in files:
# check for libraries included in Soot
e = (not re.search("^(java)\..*", f)) and re.search("\.jimple$", f)
if e:
j_file = app_path + "/" + f
parse_jimple(j_file, doc_dict, app_name, api_ver)
d = re.search("^(java)\..*", f) and re.search("\.jimple$", f)
if d:
l = app_path + "/" + f
# get the version of the API platform that the examined app uses
# you hava set the java version when run the script
def get_right_API_version(java_version):
api_dict = OrderedDict([])
api_dict = read_right_javadoc_version(java_version)
return api_dict
# find and read the right JSON file for the corresponding API version
def read_right_javadoc_version(api_version):
if (api_version) is not None :
is_api_version = False
json_file = analysis_type + "-" + api_version + ".json" # for 3rd-party Java libs find the right .json file, i.e.: json_file = "commons-io-2.5.json"
for subdir, dirs, files in os.walk(path_JSON):
for name in files:
if re.search(json_file, name):
is_api_version = True
return decode_json_doc(path_JSON + "/" + json_file)
if (is_api_version == False):
print 'Not found API version: ', api_version
raise IOError
# decode json files into dictionary
def decode_json_doc(f_json):
print "json file ", f_json
try:
with open(f_json) as f:
return json.load(f)
except ValueError:
print 'Decoding JSON has been failed: ', f_json
# parse current jimple file
def parse_jimple(file, doc_dict, app_name, api_ver):
# flag for a new method
is_method = 0
# class where the method belongs to
cl_m = ""
# total method with class
t_method = ""
# keep the current method (including the class)
initial_method = ""
# method's dictionary (label: lines, API methods, exceptions)
m_dict = OrderedDict([])
# method's subdictionary (one dict per label -see above)
attributes = {}
# for a new label block
is_new_level = 0
# new label's name
new_label = ""
# flag for a new label
is_label = 0
# keep the file (class-not embedded) that current methods belongs to
file_class = re.search(".*\.jimple$", file).group()
fl_class = re.sub(".jimple", "", file_class)
if (re.search("\$", fl_class)):
cl_m = re.sub("\$.*.$", "", fl_class)
else:
cl_m = fl_class
f = open(file)
lines = f.readlines()
for l, k in enumerate(lines):
# in case of new method signature
if ((l + 1 < len(lines)) and not (re.search(p14, lines[l])) and (re.search(p1, lines[l]) and re.search(p2, lines[l + 1])) and (is_new_level == 0) and (is_method == 0)):
method_sig = re.search(p1, lines[l]).group()
method_s = re.search(p8, method_sig).group()
method_nm = re.split("\(", method_s)
upd_method = update_md_args(method_s)
t_method = cl_m + "." + upd_method
# keep only method and cut the previous path
cl_m_l = re.split("/", cl_m)
# keep class, method and signature
tmthd = cl_m_l[len(cl_m_l) - 1] + "." + upd_method
initial_method = t_method
# change flag to indicate the existance of a new method signature
is_method = 1
# update dictionary of the application -> add new dictionary for a new method
app_dict.setdefault(initial_method, m_dict)
# new dictionary for the current method -> intialize dict
m_dict = OrderedDict([])
# change flag for new label to 0
is_new_level = 0
# not known label name yet
new_label = ""
# get a subset of the lines in the current jimple file
lines_l = lines[l:]
# in case of there is not any label in the current method
if (is_exist_label(lines_l) == False):
# change flag to show that we are in a new level - key -block
is_new_level = 1
# define that we haven't got a new label
new_label = "withoutLabel"
# add the new label as key in the current method dictionary
# subdictionary for m_dict
attributes = {}
#attributes.setdefault("lines", [])
attributes.setdefault("api_methods", [])
attributes.setdefault("exceptions", [])
m_dict.setdefault(new_label, attributes)
# in case of a new label, add new label -key in method dictionary
if ((l + 1 < len(lines)) and (re.search(p13, lines[l])) and (is_method == 1)):
# in case of a new label, add new label - key in method dictionary
label_ptrn = re.search(p13, lines[l])
# change flag to show that we are in a new label - key -block
is_new_level = 1
# define the new label's name
new_label_1 = label_ptrn.group()
new_label_2 = re.sub("\:\n", "", new_label_1)
new_label_3 = re.sub("[\s]+", " ", new_label_2)
new_label_lst = re.split(" ", new_label_3)
new_label = new_label_lst[1]
# add the new label as key in the current method dictionary, initializing label's attributes first
attributes = {}
#attributes.setdefault("lines", [])
attributes.setdefault("api_methods", [])
attributes.setdefault("exceptions", [])
m_dict.setdefault(new_label, attributes)
# in case of new line in the current level
if ((l + 1 < len(lines)) and (is_new_level == 1) and (is_method == 1)):
# add values (lines) to the current label -key
#attributes.setdefault("lines", []).append(lines[l])
# check if in the current line there exists an API method
if (re.search(p11, lines[l])):
api_m = isolate_api_method(lines[l])
attributes.setdefault("api_methods", []).append(api_m)
# check if in the next line exists a new label
if (re.search(p13, lines[l + 1])):
is_new_level = 0
# in case of new line and throw in the current level
if ((l + 2 < len(lines)) and (is_new_level == 1) and (is_method == 1)):
# exception patterns according to a jimple file (see a jimple file for example)
thr_exc = re.search(p6, lines[l]) and (re.search(p7, lines[l + 1]) or re.search(p7, lines[l + 2])) and (t_method == initial_method) and (t_method != "")
if (thr_exc):
pat1 = ""
pat2 = ""
pat3 = ""
# check in which lines are the right specialinvoke and throw
if (re.search(p9, lines[l])):
pat1 = re.search(p9, lines[l]).group()
if(re.search(p9, lines[l + 1])):
pat2 = re.search(p9, lines[l + 1]).group()
if(re.search(p9, lines[l + 2])):
pat3 = re.search(p9, lines[l + 2]).group()
if (pat1 == pat2) or (pat1 == pat3):
t_exc = re.split("\<", lines[l])
exc = re.split("\:" , t_exc[1])
e_nm = keep_exc_name(exc[0])
# exclude Throwable | |
value)
def write_stderr(self, value):
"""writes a string to standard input in the remote console"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._STDE)
write_string(self.conn, value)
################################################################
# Implementation of execution, etc...
def execution_loop(self):
"""starts processing execution requests"""
raise NotImplementedError
def run_command(self, command):
"""runs the specified command which is a string containing code"""
raise NotImplementedError
def execute_file(self, filename, args):
"""executes the given filename as the main module"""
return self.execute_file_ex('script', filename, args)
def execute_file_ex(self, filetype, filename, args):
"""executes the given filename as a 'script', 'module' or 'process'."""
raise NotImplementedError
def interrupt_main(self):
"""aborts the current running command"""
raise NotImplementedError
def exit_process(self):
"""exits the REPL process"""
raise NotImplementedError
def get_members(self, expression):
"""returns a tuple of the type name, instance members, and type members"""
raise NotImplementedError
def get_signatures(self, expression):
"""returns doc, args, vargs, varkw, defaults."""
raise NotImplementedError
def set_current_module(self, module):
"""sets the module which code executes against"""
raise NotImplementedError
def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind):
"""sets the current thread and frame which code will execute against"""
raise NotImplementedError
def get_module_names(self):
"""returns a list of module names"""
raise NotImplementedError
def flush(self):
"""flushes the stdout/stderr buffers"""
raise NotImplementedError
def attach_process(self, port, debugger_id, debug_options):
"""starts processing execution requests"""
raise NotImplementedError
def exit_work_item():
sys.exit(0)
if sys.platform == 'cli':
# We need special handling to reset the abort for keyboard interrupt exceptions
class ReplAbortException(Exception): pass
import clr
clr.AddReference('Microsoft.Dynamic')
clr.AddReference('Microsoft.Scripting')
clr.AddReference('IronPython')
from Microsoft.Scripting import KeyboardInterruptException
from Microsoft.Scripting import ParamDictionaryAttribute
from IronPython.Runtime.Operations import PythonOps
from IronPython.Runtime import PythonContext
from Microsoft.Scripting import SourceUnit, SourceCodeKind
from Microsoft.Scripting.Runtime import Scope
python_context = clr.GetCurrentRuntime().GetLanguage(PythonContext)
from System import DBNull, ParamArrayAttribute
builtin_method_descriptor_type = type(list.append)
import System
NamespaceType = type(System)
class _OldClass:
pass
_OldClassType = type(_OldClass)
_OldInstanceType = type(_OldClass())
class BasicReplBackend(ReplBackend):
future_bits = 0x3e010 # code flags used to mark future bits
"""Basic back end which executes all Python code in-proc"""
def __init__(self, mod_name='__main__'):
import threading
ReplBackend.__init__(self)
if mod_name is not None:
if sys.platform == 'cli':
self.exec_mod = Scope()
self.exec_mod.__name__ = '__main__'
else:
sys.modules[mod_name] = self.exec_mod = imp.new_module(mod_name)
else:
self.exec_mod = sys.modules['__main__']
self.code_flags = 0
self.execute_item = None
self.execute_item_lock = threading.Lock()
self.execute_item_lock.acquire() # lock starts acquired (we use it like manual reset event)
def init_connection(self):
sys.stdout = _ReplOutput(self, is_stdout = True)
sys.stderr = _ReplOutput(self, is_stdout = False)
sys.stdin = _ReplInput(self)
if sys.platform == 'cli':
import System
System.Console.SetOut(DotNetOutput(self, True))
System.Console.SetError(DotNetOutput(self, False))
def connect(self, port):
ReplBackend.connect(self, port)
self.init_connection()
def connect_using_socket(self, socket):
ReplBackend.connect_using_socket(self, socket)
self.init_connection()
def run_file_as_main(self, filename, args):
f = open(filename, 'rb')
try:
contents = f.read().replace(to_bytes('\r\n'), to_bytes('\n'))
finally:
f.close()
sys.argv = [filename]
sys.argv.extend(_command_line_to_args_list(args))
self.exec_mod.__file__ = filename
if sys.platform == 'cli':
code = python_context.CreateSnippet(contents, None, SourceCodeKind.File)
code.Execute(self.exec_mod)
else:
self.code_flags = 0
real_file = filename
if isinstance(filename, unicode) and unicode is not str:
# http://pytools.codeplex.com/workitem/696
# We need to encode the unicode filename here, Python 2.x will throw trying
# to convert it to ASCII instead of the filesystem encoding.
real_file = filename.encode(sys.getfilesystemencoding())
code = compile(contents, real_file, 'exec')
self.code_flags |= (code.co_flags & BasicReplBackend.future_bits)
exec(code, self.exec_mod.__dict__, self.exec_mod.__dict__)
def python_executor(self, code):
"""we can't close over unbound variables in execute_code_work_item
due to the exec, so we do it here"""
def func():
code.Execute(self.exec_mod)
return func
def execute_code_work_item(self):
_debug_write('Executing: ' + repr(self.current_code))
stripped_code = self.current_code.strip()
if stripped_code:
if sys.platform == 'cli':
code_to_send = ''
for line in stripped_code.split('\n'):
stripped = line.strip()
if (stripped.startswith('#') or not stripped) and not code_to_send:
continue
code_to_send += line + '\n'
code = python_context.CreateSnippet(code_to_send, None, SourceCodeKind.InteractiveCode)
dispatcher = clr.GetCurrentRuntime().GetLanguage(PythonContext).GetCommandDispatcher()
if dispatcher is not None:
dispatcher(self.python_executor(code))
else:
code.Execute(self.exec_mod)
else:
code = compile(self.current_code, '<stdin>', 'single', self.code_flags)
self.code_flags |= (code.co_flags & BasicReplBackend.future_bits)
exec(code, self.exec_mod.__dict__, self.exec_mod.__dict__)
self.current_code = None
def run_one_command(self, cur_modules, cur_ps1, cur_ps2):
# runs a single iteration of an input, execute file, etc...
# This is extracted into it's own method so we play nice w/ IronPython thread abort.
# Otherwise we have a nested exception hanging around and the 2nd abort doesn't
# work (that's probably an IronPython bug)
try:
new_modules = self._get_cur_module_set()
try:
if new_modules != cur_modules:
self.send_modules_changed()
except:
pass
cur_modules = new_modules
self.execute_item_lock.acquire()
cur_cwd = os.getcwd()
if self.check_for_exit_execution_loop():
return True, None, None, None
if self.execute_item is not None:
try:
self.execute_item()
finally:
self.execute_item = None
try:
self.send_command_executed()
except SocketError:
return True, None, None, None
try:
if cur_ps1 != sys.ps1 or cur_ps2 != sys.ps2:
new_ps1 = str(sys.ps1)
new_ps2 = str(sys.ps2)
self.send_prompt(new_ps1, new_ps2, allow_multiple_statements=False)
cur_ps1 = new_ps1
cur_ps2 = new_ps2
except Exception:
pass
try:
if cur_cwd != os.getcwd():
self.send_cwd()
except Exception:
pass
except SystemExit:
self.send_error()
self.send_exit()
# wait for ReplEvaluator to send back exit requested which will indicate
# that all the output has been processed.
while not self.exit_requested:
time.sleep(.25)
return True, None, None, None
except BaseException:
_debug_write('Exception')
exc_type, exc_value, exc_tb = sys.exc_info()
if sys.platform == 'cli':
if isinstance(exc_value.clsException, System.Threading.ThreadAbortException):
try:
System.Threading.Thread.ResetAbort()
except SystemError:
pass
sys.stderr.write('KeyboardInterrupt')
else:
# let IronPython format the exception so users can do -X:ExceptionDetail or -X:ShowClrExceptions
exc_next = self.skip_internal_frames(exc_tb)
sys.stderr.write(''.join(traceback.format_exception(exc_type, exc_value, exc_next)))
else:
exc_next = self.skip_internal_frames(exc_tb)
sys.stderr.write(''.join(traceback.format_exception(exc_type, exc_value, exc_next)))
try:
self.send_error()
except SocketError:
_debug_write('err sending DONE')
return True, None, None, None
return False, cur_modules, cur_ps1, cur_ps2
def skip_internal_frames(self, tb):
"""return the first frame outside of the repl/debugger code"""
while tb is not None and self.is_internal_frame(tb):
tb = tb.tb_next
return tb
def is_internal_frame(self, tb):
"""return true if the frame is from internal code (repl or debugger)"""
f = tb.tb_frame
co = f.f_code
filename = co.co_filename
return filename.endswith('visualstudio_py_repl.py') or filename.endswith('visualstudio_py_debugger.py')
def execution_loop(self):
"""loop on the main thread which is responsible for executing code"""
if sys.platform == 'cli' and sys.version_info[:3] < (2, 7, 1):
# IronPython doesn't support thread.interrupt_main until 2.7.1
import System
self.main_thread = System.Threading.Thread.CurrentThread
# save ourselves so global lookups continue to work (required pre-2.6)...
cur_modules = set()
try:
cur_ps1 = sys.ps1
cur_ps2 = sys.ps2
except:
# CPython/IronPython don't set sys.ps1 for non-interactive sessions, Jython and PyPy do
sys.ps1 = cur_ps1 = '>>> '
sys.ps2 = cur_ps2 = '... '
self.send_prompt(cur_ps1, cur_ps2, allow_multiple_statements=False)
while True:
exit, cur_modules, cur_ps1, cur_ps2 = self.run_one_command(cur_modules, cur_ps1, cur_ps2)
if exit:
return
def check_for_exit_execution_loop(self):
return False
def execute_script_work_item(self):
self.run_file_as_main(self.current_code, self.current_args)
def execute_module_work_item(self):
new_argv = [''] + _command_line_to_args_list(self.current_args)
old_argv = sys.argv
import runpy
try:
sys.argv = new_argv
runpy.run_module(self.current_code, alter_sys=True)
except Exception:
traceback.print_exc()
finally:
sys.argv = old_argv
def execute_process_work_item(self):
try:
from subprocess import Popen, PIPE, STDOUT
import codecs
out_codec = codecs.lookup(sys.stdout.encoding)
proc = Popen(
'"%s" %s' % (self.current_code, self.current_args),
stdout=PIPE,
stderr=STDOUT,
bufsize=0,
)
for line in proc.stdout:
print(out_codec.decode(line, 'replace')[0].rstrip('\r\n'))
except Exception:
traceback.print_exc()
@staticmethod
def _get_cur_module_set():
"""gets the set of modules avoiding exceptions if someone puts something
weird in there"""
try:
return set(sys.modules)
except:
res = set()
for name in sys.modules:
try:
res.add(name)
except:
pass
return res
def run_command(self, command):
self.current_code = command
self.execute_item = self.execute_code_work_item
self.execute_item_lock.release()
def execute_file_ex(self, filetype, filename, args):
self.current_code = filename
self.current_args = args
self.execute_item = getattr(self, 'execute_%s_work_item' % filetype, None)
self.execute_item_lock.release()
def interrupt_main(self):
# acquire the send lock so we dont interrupt while we're communicting w/ the debugger
with self.send_lock:
if sys.platform == 'cli' and sys.version_info[:3] < (2, 7, 1):
# IronPython doesn't get thread.interrupt_main until 2.7.1
self.main_thread.Abort(ReplAbortException())
else:
thread.interrupt_main()
def exit_process(self):
self.execute_item = exit_work_item
try:
self.execute_item_lock.release()
except:
pass
sys.exit(0)
def get_members(self, expression):
"""returns a tuple of the type name, instance members, and type members"""
getattr_func = getattr
if not expression:
all_members = {}
if sys.platform == 'cli':
code = python_context.CreateSnippet('vars()', None, SourceCodeKind.AutoDetect)
items = code.Execute(self.exec_mod)
else:
items = self.exec_mod.__dict__
for key, value in items.items():
all_members[key] = self.get_type_name(value)
return '', all_members, {}
else:
if sys.platform == 'cli':
code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect)
val = code.Execute(self.exec_mod)
code = python_context.CreateSnippet('dir(' + expression + ')', None, SourceCodeKind.AutoDetect)
members = code.Execute(self.exec_mod)
code = python_context.CreateSnippet('lambda value, name: getattr(value, name)', None, SourceCodeKind.AutoDetect)
getattr_func = code.Execute(self.exec_mod)
else:
val = eval(expression, self.exec_mod.__dict__, self.exec_mod.__dict__)
members = dir(val)
return self.collect_members(val, members, getattr_func)
def collect_members(self, val, members, getattr_func):
t = type(val)
inst_members = {}
if hasattr(val, '__dict__'):
# collect the instance members
try:
for mem_name in val.__dict__:
mem_t = self._get_member_type(val, mem_name, True, getattr_func)
if mem_t is not None:
inst_members[mem_name] = mem_t
except:
pass
# collect the type members
type_members = {}
for mem_name in members:
if mem_name not | |
import sqlite3
import tkinter as tk
import sqlite3
from logik import Logik
class ModelHotel:
def __init__(self,tab,text,Button):
self.tab = tab
self.text = text
self.button = Button
self.counter1 = 1
self.counter2 = 2
self.counter3 = 3
self.counter4 = 4
self.counter5 = 5
self.counter6 = 6
self.counter7 = 7
self.counter8 = 8
self.counter9 = 9
self.counter10 = 10
self.text.destroy()
self.button.destroy()
self.root = tk.Tk()
self.root.geometry('500x500')
self.root.title('Model')
self.data = 1
if self.data < 10:
self.myData = f"0{self.data}.02.2021"
else:
self.myData = f"{self.data}.02.2021"
self.roomTXT1 = 'Номер - 1'
self.roomTXT2 = 'Номер - 2'
self.roomTXT3 = 'Номер - 3'
self.roomTXT4 = 'Номер - 4'
self.roomTXT5 = 'Номер - 5'
self.roomTXT6 = 'Номер - 6'
self.roomTXT7 = 'Номер - 7'
self.roomTXT8 = 'Номер - 8'
self.roomTXT9 = 'Номер - 9'
self.roomTXT10 = 'Номер - 10'
self.conn = sqlite3.connect('testDB.db')
self.cursor = self.conn.cursor()
self.myData1 = tk.Label(
self.root,
text = self.myData,
font=(
None,
20
)
)
self.myData1.place(
x = 170,
y = 0
)
self.roomTXT1 = tk.Label(
self.root,
text = self.roomTXT1,
font=(
None,
15
)
)
self.roomTXT1.place(
x = 0,
y = 40
)
self.roomTXT2 = tk.Label(
self.root,
text=self.roomTXT2,
font=(
None,
15
)
)
self.roomTXT2.place(
x=0,
y=80
)
self.roomTXT3 = tk.Label(
self.root,
text=self.roomTXT3,
font=(
None,
15
)
)
self.roomTXT3.place(
x=0,
y=120
)
self.roomTXT4 = tk.Label(
self.root,
text=self.roomTXT4,
font=(
None,
15
)
)
self.roomTXT4.place(
x=0,
y=160
)
self.roomTXT5 = tk.Label(
self.root,
text=self.roomTXT5,
font=(
None,
15
)
)
self.roomTXT5.place(
x=0,
y=200
)
self.roomTXT6 = tk.Label(
self.root,
text=self.roomTXT6,
font=(
None,
15
)
)
self.roomTXT6.place(
x=0,
y=240
)
self.roomTXT7 = tk.Label(
self.root,
text=self.roomTXT7,
font=(
None,
15
)
)
self.roomTXT7.place(
x=0,
y=280
)
self.roomTXT8 = tk.Label(
self.root,
text=self.roomTXT8,
font=(
None,
15
)
)
self.roomTXT8.place(
x=0,
y=320
)
self.roomTXT9 = tk.Label(
self.root,
text=self.roomTXT9,
font=(
None,
15
)
)
self.roomTXT9.place(
x=0,
y=360
)
self.roomTXT10 = tk.Label(
self.root,
text=self.roomTXT10,
font=(
None,
15
)
)
self.roomTXT10.place(
x=0,
y=400
)
# ENTRY
self.room1Entry = tk.Entry(
self.root,
width = 25
)
self.room1Entry.place(
x = 200,
y = 40 + 5
)
self.room2Entry = tk.Entry(
self.root,
width=25
)
self.room2Entry.place(
x=200,
y=80 + 5
)
self.room3Entry = tk.Entry(
self.root,
width=25
)
self.room3Entry.place(
x=200,
y=120 + 5
)
self.room4Entry = tk.Entry(
self.root,
width=25
)
self.room4Entry.place(
x=200,
y=160 + 5
)
self.room5Entry = tk.Entry(
self.root,
width=25
)
self.room5Entry.place(
x=200,
y=200 + 5
)
self.room6Entry = tk.Entry(
self.root,
width=25
)
self.room6Entry.place(
x=200,
y=240 + 5
)
self.room7Entry = tk.Entry(
self.root,
width=25
)
self.room7Entry.place(
x=200,
y=280 + 5
)
self.room8Entry = tk.Entry(
self.root,
width=25
)
self.room8Entry.place(
x=200,
y=320 + 5
)
self.room9Entry = tk.Entry(
self.root,
width=25
)
self.room9Entry.place(
x=200,
y=360 + 5
)
self.room10Entry = tk.Entry(
self.root,
width=25
)
self.room10Entry.place(
x=200,
y=400 + 5
)
######
## Button
######
GeneralButton = tk.Button(
self.root,
width = 20,
text = 'Дальше',
background = "#DCDCDC",
command = lambda: self.update()
)
GeneralButton.place(
x = 200,
y = 450
)
def update(self):
self.data +=1
first = self.room1Entry.get()
second = self.room2Entry.get()
third = self.room3Entry.get()
fourth = self.room4Entry.get()
fifth = self.room5Entry.get()
sixth = self.room6Entry.get()
seventh = self.room7Entry.get()
eight = self.room8Entry.get()
ninth = self.room9Entry.get()
tenth = self.room10Entry.get()
self.room1Entry.delete(
0,
last=tk.END
)
self.room2Entry.delete(
0,
last=tk.END
)
self.room3Entry.delete(
0,
last=tk.END
)
self.room4Entry.delete(
0,
last=tk.END
)
self.room5Entry.delete(
0,
last=tk.END
)
self.room6Entry.delete(
0,
last=tk.END
)
self.room7Entry.delete(
0,
last=tk.END
)
self.room8Entry.delete(
0,
last=tk.END
)
self.room9Entry.delete(
0,
last=tk.END
)
self.room10Entry.delete(
0,
last=tk.END
)
########################################################UPDATE##################################################
self.zero_prise = 0
try:
first = int(first)
if first == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(self.counter1) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if first > 1850 and first < 7400:
sql = "UPDATE model SET prise = '" + str(first) + "'," " free= 'no' WHERE id = '" + str(self.counter1) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter1) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter1 +=10
except:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter1) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter1 += 10
try:
second = int(second)
if second == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(
self.counter2) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if second > 1850 and second < 7400:
sql = "UPDATE model SET prise = '" + str(second) + "'," " free= 'no' WHERE id = '" + str(
self.counter2) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter2) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter2 += 10
except:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter2) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter2 += 10
try:
third = int(third)
if third == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(self.counter3) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if third > 1850 and third < 7400:
sql = "UPDATE model SET prise = '" + str(third) + "'," " free= 'no' WHERE id = '" + str(self.counter3) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter3) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter3 += 10
except:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter3) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter3 += 10
try:
fourth = int(fourth)
if fourth == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(self.counter4) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if fourth > 1850 and fourth < 7400:
sql = "UPDATE model SET prise = '" + str(fourth) + "'," " free= 'no' WHERE id = '" + str(self.counter4) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter4) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter4 += 10
except:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter4) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter4 += 10
try:
fifth = int(fifth)
if fifth == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(self.counter5) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if fifth > 1850 and fifth < 7400:
sql = "UPDATE model SET prise = '" + str(fifth) + "'," " free= 'no' WHERE id = '" + str(self.counter5) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter5) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter5 += 10
except:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter5) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter5 += 10
try:
sixth = int(sixth)
if sixth == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(self.counter6) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if sixth > 1850 and sixth < 7400:
sql = "UPDATE model SET prise = '" + str(sixth) + "'," " free= 'no' WHERE id = '" + str(self.counter6) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter6) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter6 += 10
except:
sql = "UPDATE model SET prise = '3700', free= 'no' WHERE id = '" + str(self.counter6) + "'"
self.cursor.execute(sql)
self.conn.commit()
self.counter6 += 10
try:
seventh = int(seventh)
if seventh == 0:
sql = "UPDATE model SET prise = '" + str(self.zero_prise) + "'," " free= 'yes' WHERE id = '" + str(self.counter7) + "' "
self.cursor.execute(sql)
self.conn.commit()
else:
if seventh > 1850 and seventh < 7400:
sql = "UPDATE model | |
emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def _connection_down(self, server, blacklist=False):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
if blacklist:
self.blacklisted_servers.add(server)
# rt12 --- there might be a better place for this.
self.config.set_key("server_blacklist", list(self.blacklisted_servers), True)
else:
self.disconnected_servers.add(server)
if server == self.default_server:
self._set_status('disconnected')
if server in self.interfaces:
self._close_interface(self.interfaces[server])
self._notify('interfaces')
for b in Blockchain.blockchains:
if b.catch_up == server:
b.catch_up = None
def _new_interface(self, server_key, socket):
self._add_recent_server(server_key)
interface = Interface(server_key, socket)
interface.requested_chunks = set()
interface.blockchain = None
interface.tip_raw = None
interface.tip = 0
interface.set_mode(Interface.MODE_VERIFICATION)
with self.interface_lock:
self.interfaces[server_key] = interface
# server.version should be the first message
params = [PACKAGE_VERSION, PROTOCOL_VERSION]
self._queue_request('server.version', params, interface)
if not self._request_checkpoint_headers(interface):
self._subscribe_headers([interface])
if server_key == self.default_server:
self.switch_to_interface(server_key, self.SWITCH_DEFAULT)
def _subscribe_headers(self, interfaces):
# The interface will immediately respond with it's last known header.
for interface in interfaces:
interface.logger.debug('subscribing to headers')
self._queue_request('blockchain.headers.subscribe', [], interface)
def _maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self._new_interface(server, socket)
else:
self._connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
with self.interface_lock:
interfaces = list(self.interfaces.values())
for interface in interfaces:
if interface.has_timed_out():
self._connection_down(interface.server)
elif interface.ping_required():
self._queue_request('server.ping', [], interface)
now = time.time()
# nodes
with self.interface_lock:
server_count = len(self.interfaces) + len(self.connecting)
if server_count < self.num_server:
self._start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
logger.debug('retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
with self.interface_lock:
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self._switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server, self.SWITCH_SOCKET_LOOP)
def _request_headers(self, interface, base_height, count):
assert count <=2016
cp_height = app_state.headers.checkpoint.height
params = (base_height, count, cp_height if base_height + count < cp_height else 0)
# The verifier spams us...
if params not in interface.requested_chunks:
interface.requested_chunks.add(params)
interface.logger.info(f'requesting {count:,d} headers from height {base_height:,d}')
self._queue_request('blockchain.block.headers', params, interface)
def _on_block_headers(self, interface, request, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if not request or result is None or params is None or error is not None:
interface.logger.error(error or 'bad response')
return
request_params = request[1]
request_base_height, expected_header_count, cp_height = request_params
# Ignore unsolicited chunks (how can this even happen with request provided?)
try:
interface.requested_chunks.remove(request_params)
except KeyError:
interface.logger.error("unsolicited chunk base_height=%s count=%s",
request_base_height, expected_header_count)
return
hexdata = result['hex']
header_hexsize = 80 * 2
raw_chunk = bfh(hexdata)
actual_header_count = len(raw_chunk) // 80
# We accept fewer headers than we asked for, to cover the case where the distance
# to the tip was unknown.
if actual_header_count > expected_header_count:
interface.logger.error("chunk data size incorrect expected_size=%s actual_size=%s",
expected_header_count * 80, len(raw_chunk))
return
proof_was_provided = False
if 'root' in result and 'branch' in result:
header_height = request_base_height + actual_header_count - 1
header_offset = (actual_header_count - 1) * header_hexsize
header = hexdata[header_offset : header_offset + header_hexsize]
if not self._validate_checkpoint_result(interface, result["root"],
result["branch"], header, header_height):
# Got checkpoint validation data, server failed to provide proof.
interface.logger.error("blacklisting server for incorrect checkpoint proof")
self._connection_down(interface.server, blacklist=True)
return
proof_was_provided = True
elif len(request_params) == 3 and request_params[2] != 0:
# Expected checkpoint validation data, did not receive it.
self._connection_down(interface.server)
return
were_needed = Blockchain.needs_checkpoint_headers
try:
interface.blockchain = Blockchain.connect_chunk(request_base_height, raw_chunk,
proof_was_provided)
except (IncorrectBits, InsufficientPoW, MissingHeader) as e:
interface.logger.error(f'blacklisting server for failed connect_chunk: {e}')
self._connection_down(interface.server, blacklist=True)
return
interface.logger.debug("connected chunk, height=%s count=%s",
request_base_height, actual_header_count)
# If we connected the checkpoint headers all interfaces can subscribe to headers
if were_needed and not self._request_checkpoint_headers(interface):
with self.interface_lock:
self._subscribe_headers(self.interfaces.values())
if not interface.requested_chunks:
if interface.blockchain.height() < interface.tip:
self._request_headers(interface, interface.blockchain.height(), 1000)
else:
interface.set_mode(Interface.MODE_DEFAULT)
interface.logger.debug('catch up done %s', interface.blockchain.height())
interface.blockchain.catch_up = None
self._notify('updated')
def _request_header(self, interface, height):
'''
This works for all modes except for 'default'.
If it is to be used for piecemeal filling of the sparse blockchain
headers file before the checkpoint height, it needs extra
handling for the 'default' mode.
A server interface does not get associated with a blockchain
until it gets handled in the response to it's first header
request.
'''
interface.logger.debug("requesting header %d", height)
if height > Net.VERIFICATION_BLOCK_HEIGHT:
params = [height]
else:
params = [height, Net.VERIFICATION_BLOCK_HEIGHT]
self._queue_request('blockchain.block.header', params, interface)
return True
def _on_header(self, interface, request, response):
'''Handle receiving a single block header'''
result = response.get('result')
if not result:
interface.logger.error(response)
self._connection_down(interface.server)
return
if not request:
interface.logger.error("blacklisting server for sending unsolicited header, "
"no request, params=%s", response['params'])
self._connection_down(interface.server, blacklist=True)
return
request_params = request[1]
height = request_params[0]
response_height = response['params'][0]
# This check can be removed if request/response params are reconciled in some sort
# of rewrite.
if height != response_height:
interface.logger.error("unsolicited header request=%s request_height=%s "
"response_height=%s", request_params, height, response_height)
self._connection_down(interface.server)
return
# FIXME: we need to assert we get a proof if we need / requested one
proof_was_provided = False
hexheader = None
if 'root' in result and 'branch' in result and 'header' in result:
hexheader = result["header"]
if not self._validate_checkpoint_result(interface, result["root"],
result["branch"], hexheader, height):
# Got checkpoint validation data, failed to provide proof.
interface.logger.error("unprovable header request=%s height=%s",
request_params, height)
self._connection_down(interface.server)
return
proof_was_provided = True
else:
hexheader = result
# Simple header request.
raw_header = bfh(hexheader)
try:
_header, interface.blockchain = Blockchain.connect(height, raw_header,
proof_was_provided)
interface.logger.debug(f'Connected header at height {height:,d}')
except MissingHeader as e:
interface.logger.info(f'failed to connect header at height {height:,d}: {e}')
interface.blockchain = None
except (IncorrectBits, InsufficientPoW) as e:
interface.logger.error(f'blacklisting server for failed _on_header connect: {e}')
self._connection_down(interface.server, blacklist=True)
return
if interface.mode == Interface.MODE_BACKWARD:
if interface.blockchain:
interface.set_mode(Interface.MODE_BINARY)
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
# A backwards header request should not happen before the checkpoint
# height. It isn't requested in this context, and it isn't requested
# anywhere else. If this happens it is an error. Additionally, if the
# checkpoint height header was requested and it does not connect, then
# there's not much ElectrumSV can do about it (that we're going to
# bother). We depend on the checkpoint being relevant for the blockchain
# the user is running against.
assert height > Net.VERIFICATION_BLOCK_HEIGHT
interface.bad = height
delta = interface.tip - height
# If the longest chain does not connect at any point we check to the
# chain this interface is serving, then we fall back on the checkpoint
# height which is expected to work.
next_height = max(Net.VERIFICATION_BLOCK_HEIGHT + 1,
interface.tip - 2 * delta)
elif interface.mode == Interface.MODE_BINARY:
if interface.blockchain:
interface.good = height
else:
interface.bad = height
next_height = (interface.bad + interface.good) // 2
if next_height == interface.good:
interface.set_mode(Interface.MODE_CATCH_UP)
elif interface.mode == Interface.MODE_CATCH_UP:
if interface.blockchain is None:
# go back
interface.logger.info("cannot connect %d", height)
interface.set_mode(Interface.MODE_BACKWARD)
interface.bad = height
next_height = height - 1
else:
next_height = height + 1 if height < interface.tip else None
if next_height is None:
# exit catch_up state
interface.logger.debug('catch up done %d', interface.blockchain.height())
interface.blockchain.catch_up = None
self._switch_lagging_interface()
self._notify('updated')
elif interface.mode == Interface.MODE_DEFAULT:
interface.logger.error(f'ignored header {_header} received in default mode')
return
# If not finished, get the next header
if next_height:
if interface.mode == Interface.MODE_CATCH_UP and interface.tip > next_height:
self._request_headers(interface, next_height, 1000)
else:
self._request_header(interface, next_height)
else:
interface.set_mode(Interface.MODE_DEFAULT)
self._notify('updated')
# refresh network dialog
self._notify('interfaces')
def maintain_requests(self):
with self.interface_lock:
interfaces = list(self.interfaces.values())
for interface in interfaces:
if interface.unanswered_requests and time.time() - interface.request_time > 20:
# The last request made is still outstanding, and was over 20 seconds ago.
interface.logger.error("blockchain request timed out")
self._connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
with self.interface_lock:
interfaces = list(self.interfaces.values())
rin = [i for i in interfaces]
win = [i for i in interfaces if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
| |
<gh_stars>1-10
# source: https://github.com/pytorch/vision/blob/master/references/detection/
import math
import time
import torch
import torchvision.models.detection.mask_rcnn
from sklearn.metrics import roc_auc_score, matthews_corrcoef
import numpy as np
from scipy.special import softmax
import sys
import nucls_model.torchvision_detection_utils.utils as utils # noqa
from nucls_model.torchvision_detection_utils.coco_utils import get_coco_api_from_dataset # noqa
from nucls_model.torchvision_detection_utils.coco_eval import CocoEvaluator # noqa
from nucls_model.MaskRCNN import MaskRCNN # noqa
import nucls_model.torchvision_detection_utils.transforms as tvdt # noqa
from nucls_model.DataLoadingUtils import _crop_all_to_fov # noqa
from nucls_model.MiscUtils import map_bboxes_using_hungarian_algorithm # noqa
# noinspection LongLine
def train_one_epoch(
model, device, optimizer, data_loader, effective_batch_size=None,
epoch=1, lr_scheduler=None, loss_weights=None,
print_freq=1, window_size=20):
model.training()
metric_logger = utils.MetricLogger(delimiter=" ", window_size=window_size)
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
if effective_batch_size is None:
effective_batch_size = data_loader.batch_size
else:
assert effective_batch_size % data_loader.batch_size == 0
# Strategy: We pass one "subbatch" through the model, then get the loss,
# but do NOT backprop .. the loss from multiple subbatches are accumulated
# together so the effective batch size is bigger that what can fit GPU
subbatches_per_grup = int(effective_batch_size / data_loader.batch_size)
global_subbatch_count = 0
subbatch = 0
grup_loss = 0. # total
grup_losses = {} # breakdown
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
# before I forget
subbatch += 1
global_subbatch_count += 1
# move subbatch to device
images = list(image.to(device) for image in images)
if isinstance(targets[0], dict):
# faster/maskrcnn
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
else:
# pure classification (targets is a list)
targets = list(target.to(device) for target in targets)
# forward pass
loss_dict = model(images, targets)
# Multiply different loss types by a weight
if loss_weights is None:
loss_weights = {k: 1.0 for k in loss_dict}
loss_dict = {
lname: loss * loss_weights[lname]
for lname, loss in loss_dict.items()
}
# reduce losses over all GPUs
loss_dict_reduced = utils.reduce_dict(loss_dict)
# stop if nan loss
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
# print("One of the losses if nan! Replacing with zero for now.")
# print(loss_dict_reduced)
# for k, v in loss_dict_reduced.items():
# loss_dict_reduced[k][torch.isnan(v)] = 0.
# losses_reduced = sum(loss for loss in loss_dict_reduced.values())
# loss_value = losses_reduced.item()
# Clear old gradients from the last step, we already have the loss
# (else you’d accumulate gradients from all loss.backward() calls)
optimizer.zero_grad()
# update overall loss (per effective batch size)
grup_loss += loss_value
for k, v in loss_dict_reduced.items():
if k in grup_losses:
grup_losses[k] += float(v)
else:
grup_losses[k] = float(v)
# Last subbatch, time to backpropagate!
if subbatch == subbatches_per_grup:
# total loss, averaged over subbatches in this batch
losses = sum(loss for lname, loss in loss_dict.items())
losses = (losses + grup_loss - loss_value) / subbatches_per_grup
# compute the derivative of the loss w.r.t. the parameters
# (or anything requiring gradients) using backpropagation
losses.backward()
# take a step based on the gradients of the parameters
optimizer.step()
# maybe update learning rate
if lr_scheduler is not None:
lr_scheduler.step()
# get floats for logging
grup_loss /= subbatches_per_grup
grup_losses = {
k: v / subbatches_per_grup for k, v in grup_losses.items()
}
# update logger
metric_logger.update(loss=grup_loss, **grup_losses)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# reset subbatch index and effective batch losses
subbatch = 0
grup_loss = 0.
grup_losses = {k: 0. for k in grup_losses}
return metric_logger
# noinspection LongLine
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
# Mohamed: I edited this to refer to my MaskRCNN
if isinstance(model_without_ddp, MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types
def _update_classification_metrics(
metrics_dict, all_labels, rlabelcodes,
all_scores=None, output_labels=None,
codemap=None, prefix='',):
"""IMPORTANT NOTE: This assumes that all_labels start at zero and
are contiguous, and that all_scores is shaped (n_nuclei, n_classes),
where n_classes is the REAL number of classes.
"""
if len(all_labels) < 1:
return metrics_dict
pexist = all_scores is not None
if not pexist:
assert output_labels is not None
# maybe group scores from classes that belong to the same supercateg
if codemap is not None:
tmp_lbls = all_labels.copy()
tmp_scrs = np.zeros(all_scores.shape) if pexist else None
for k, v in codemap.items():
# remap labels
tmp_lbls[all_labels == k] = v
# aggregate probab. for classes to be grouped
if pexist:
tmp_scrs[:, v] += all_scores[:, k]
all_labels = tmp_lbls
all_scores = tmp_scrs
unique_classes = np.unique(all_labels).tolist()
n_classes = len(unique_classes)
if pexist:
all_preds = np.argmax(all_scores, 1)
else:
all_preds = output_labels
if n_classes > 0:
# accuracy
metrics_dict[f'{prefix}accuracy'] = np.mean(0 + (all_preds == all_labels))
# Mathiew's Correlation Coefficient
metrics_dict[f'{prefix}mcc'] = matthews_corrcoef(y_true=all_labels, y_pred=all_preds)
# Class confusions (unnormalized, just numbers)
for tcc, tc in rlabelcodes.items():
for pcc, pc in rlabelcodes.items():
coln = f'{prefix}confusion_trueClass-{tc}_predictedClass-{pc}'
keep1 = 0 + (all_labels == tcc)
keep2 = 0 + (all_preds == pcc)
metrics_dict[coln] = np.sum(0 + ((keep1 + keep2) == 2))
if n_classes > 1:
# Class-by-class accuracy
trg = np.zeros((len(all_labels), n_classes))
scr = np.zeros((len(all_labels), n_classes))
for cid, cls in enumerate(unique_classes):
cls_name = rlabelcodes[cls]
# Accuracy
tr = 0 + (all_labels == cls)
pr = 0 + (all_preds == cls)
metrics_dict[f'{prefix}accuracy_{cls_name}'] = np.mean(0 + (tr == pr))
# Mathiew's Correlation Coefficient
metrics_dict[f'{prefix}mcc_{cls_name}'] = matthews_corrcoef(y_true=tr, y_pred=pr)
# ROC AUC. Note that it's only defined for classes present in gt
if pexist:
trg[:, cid] = 0 + (all_labels == cls)
scr[:, cid] = all_scores[:, cls]
metrics_dict[f'{prefix}aucroc_{cls_name}'] = roc_auc_score(
y_true=trg[:, cid], y_score=all_scores[:, cid])
# renormalize with softmax & get rocauc
if pexist:
scr = softmax(scr, -1)
metrics_dict[f'{prefix}auroc_micro'] = roc_auc_score(
y_true=trg, y_score=scr, multi_class='ovr', average='micro')
metrics_dict[f'{prefix}auroc_macro'] = roc_auc_score(
y_true=trg, y_score=scr, multi_class='ovr', average='macro')
print(f"\nClassification results: {prefix}")
for k, v in metrics_dict.items():
if k.startswith(prefix) and ('confusion_' not in k):
print(f'{k}: {v}')
# noinspection PyPep8Naming,LongLine
@torch.no_grad()
def evaluate(
model, data_loader, device, maxDets=None, crop_inference_to_fov=False):
# See: https://cocodataset.org/#detection-eval
# NOTE: The coco evaluator (and what's reported in FasterRCNN and
# maskrcnn papers) combines detection and classification by
# considering something to be detected only if it's from the same
# class. eg. If the model places a bounding box and labels it "traffic
# light", but in reality that location has a "person", this is
# considered a false positive traffic light and a false negative
# person. We'd like to get this metric, sure, but we're also
# interested in classic detection .. i.e. just "is there a nucleus?"
# so we get AP using both the full set of classes AS WELL AS
# a remapped class set where anything is considered a "nucleus"
n_threads = torch.get_num_threads()
# mFIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
# iou_types = _get_iou_types(model)
iou_types = ['bbox'] # segmAP is meaningless in my hybrid bbox/segm dataset
maxDets = [1, 10, 100] if maxDets is None else maxDets
cropper = tvdt.Cropper() if crop_inference_to_fov else None
# combined detection & classification precision/recall
dst = data_loader.dataset
coco = get_coco_api_from_dataset(dst, crop_inference_to_fov=crop_inference_to_fov)
coco_evaluator = CocoEvaluator(coco, iou_types, maxDets=maxDets)
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# precision/recall for just detection (objectness)
classification = dst.do_classification
if classification:
# IMPORTANT: REVERSE ME AFTER DEFINING COCO API
dst.do_classification = False
dst.set_labelmaps()
metric_logger_objectness = utils.MetricLogger(delimiter=" ")
coco_objectness = get_coco_api_from_dataset(
dst, crop_inference_to_fov=crop_inference_to_fov)
coco_evaluator_objectness = CocoEvaluator(
coco_objectness, iou_types, maxDets=maxDets)
# IMPORTANT: THIS LINE IS CRITICAL
dst.do_classification = True
dst.set_labelmaps()
else:
metric_logger_objectness = None
# noinspection PyUnusedLocal
coco_objectness = None
coco_evaluator_objectness = None
n_true = 0
n_pred = 0
n_matched = 0
cltargets = []
clprobabs = []
cloutlabs = []
seg_intersects = []
seg_sums = []
def _get_categnames(prefix):
if prefix == '':
return dst.categs_names
return dst.supercategs_names
for images, targets in metric_logger.log_every(data_loader, 100, header):
images = list(img.to(device) for img in images)
targets = list(targets)
# uncomment if GPU
# torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [
{k: v.to(cpu_device) for k, v in t.items() if v is not None}
for t in outputs
]
model_time = time.time() - model_time
if crop_inference_to_fov:
images, targets, outputs = _crop_all_to_fov(
images=images, targets=targets, outputs=outputs,
cropper=cropper)
# combined detection & classification precision/recall
res = {
target["image_id"].item(): output
for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(
model_time=model_time, evaluator_time=evaluator_time)
probabs_exist = 'probabs' in outputs[0]
if classification:
# IMPORTANT NOTE: The way that FasterRCNN is implemented
| |
try:
num1 = float(menor[i])
num2 = float(mayor[i])
result.append(mt.div(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': exp1['columna'].append(exp2['columna'][0])
}
return newdict
else:
#Solo una de ellas es diccionario
dic = exp1 if isinstance(exp1,dict) else exp2
val = exp1 if not isinstance(exp1,dict) else exp2
valores = dic['valores']
result = []
for col in valores:
try:
num1 = float(col)
num2 = float(val)
result.append(mt.div(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': dic['columna']
}
return newdict
#si ninguna es diccionario
else:
try:
num1 = float(exp1)
num2 = float(exp2)
return mt.div(num1 , num2)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_exp(column_mathtrig):
def __init__(self,exp,alias):
self.exp = exp
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp = self.exp.ejecutar(tables)
if isinstance(exp,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
if isinstance(exp,dict):
#es diccionario
registros = exp['valores']
result = []
for reg in registros:
try:
num = int(reg)
result.append(mt.exp(num))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
exp['valores'] = result
return exp
else:
#no es diccionario
try:
num = int(exp)
return mt.exp(num)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_factorial(column_mathtrig):
def __init__(self, exp, alias):
self.exp = exp
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp = self.exp.ejecutar(tables)
if isinstance(exp,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
if isinstance(exp,dict):
#es diccionario
registros = exp['valores']
result = []
for reg in registros:
try:
num = int(reg)
result.append(mt.factorial(num))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
exp['valores'] = result
return exp
else:
#no es diccionario
try:
num = int(exp)
return mt.factorial(num)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_floor(column_mathtrig):
def __init__(self, exp, alias):
self.exp = exp
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp = self.exp.ejecutar(tables)
if isinstance(exp,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
if isinstance(exp,dict):
#es diccionario
registros = exp['valores']
result = []
for reg in registros:
try:
num = float(reg)
result.append(mt.floor(num))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
exp['valores'] = result
return exp
else:
#no es diccionario
try:
num = float(exp)
return mt.floor(num)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico','Semantico')
errores.insert_error(e)
return e
class math_gcd(column_mathtrig):
def __init__(self, exp1, exp2, alias):
self.exp1 = exp1
self.exp2 = exp2
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp1 = self.exp1.ejecutar(tables)
exp2 = self.exp2.ejecutar(tables)
if isinstance(exp1,CError) or isinstance(exp2,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
# si al menos una es diccionario
if isinstance(exp1,dict) or isinstance(exp2,dict):
#Si ambas son diccionario
if isinstance(exp1,dict) and isinstance(exp2,dict):
val1 = exp1['valores']
val2 = exp2['valores']
#vemos cual es el menor
menor = val1 if len(val1) < len(val2) else val2
mayor = val1 if len(val1) > len(val2) else val2
result = []
#iteramos sobre el menor
for i in range(len(menor)):
try:
num1 = int(menor[i])
num2 = int(mayor[i])
result.append(mt.gcd(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': exp1['columna'].append(exp2['columna'][0])
}
return newdict
else:
#Solo una de ellas es diccionario
dic = exp1 if isinstance(exp1,dict) else exp2
val = exp1 if not isinstance(exp1,dict) else exp2
valores = dic['valores']
result = []
for col in valores:
try:
num1 = int(col)
num2 = int(val)
result.append(mt.gcd(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': dic['columna']
}
return newdict
#si ninguna es diccionario
else:
try:
num1 = int(exp1)
num2 = int(exp2)
return mt.gcd(num1,num2)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_lcm(column_mathtrig):
def __init__(self,exp1,exp2,alias):
self.exp1 = exp1
self.exp2 = exp2
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp1 = self.exp1.ejecutar(tables)
exp2 = self.exp2.ejecutar(tables)
if isinstance(exp1,CError) or isinstance(exp2,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
# si al menos una es diccionario
if isinstance(exp1,dict) or isinstance(exp2,dict):
#Si ambas son diccionario
if isinstance(exp1,dict) and isinstance(exp2,dict):
val1 = exp1['valores']
val2 = exp2['valores']
#vemos cual es el menor
menor = val1 if len(val1) < len(val2) else val2
mayor = val1 if len(val1) > len(val2) else val2
result = []
#iteramos sobre el menor
for i in range(len(menor)):
try:
num1 = int(menor[i])
num2 = int(mayor[i])
result.append(mt.lcm(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': exp1['columna'].append(exp2['columna'][0])
}
return newdict
else:
#Solo una de ellas es diccionario
dic = exp1 if isinstance(exp1,dict) else exp2
val = exp1 if not isinstance(exp1,dict) else exp2
valores = dic['valores']
result = []
for col in valores:
try:
num1 = int(col)
num2 = int(val)
result.append(mt.lcm(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': dic['columna']
}
return newdict
#si ninguna es diccionario
else:
try:
num1 = int(exp1)
num2 = int(exp2)
return mt.lcm(num1,num2)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_ln(column_mathtrig):
def __init__(self, exp, alias):
self.exp = exp
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp = self.exp.ejecutar(tables)
if isinstance(exp,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
if isinstance(exp,dict):
#es diccionario
registros = exp['valores']
result = []
for reg in registros:
try:
num = float(reg)
result.append(mt.ln(num))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
exp['valores'] = result
return exp
else:
#no es diccionario
try:
num = float(exp)
return mt.ln(num)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_log(column_mathtrig):
def __init__(self, exp1, exp2, alias):
self.exp1 = exp1
self.exp2 = exp2
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp1 = self.exp1.ejecutar(tables)
exp2 = self.exp2.ejecutar(tables)
if isinstance(exp1,CError) or isinstance(exp2,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
# si al menos una es diccionario
if isinstance(exp1,dict) or isinstance(exp2,dict):
#Si ambas son diccionario
if isinstance(exp1,dict) and isinstance(exp2,dict):
val1 = exp1['valores']
val2 = exp2['valores']
#vemos cual es el menor
menor = val1 if len(val1) < len(val2) else val2
mayor = val1 if len(val1) > len(val2) else val2
result = []
#iteramos sobre el menor
for i in range(len(menor)):
try:
num1 = int(menor[i])
num2 = int(mayor[i])
result.append(mt.log(num1,num2))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
newdict = {
'valores':result,
'columna': exp1['columna'].append(exp2['columna'][0])
}
return newdict
else:
#Solo una de ellas es diccionario
dic = exp1 if isinstance(exp1,dict) else exp2
val = exp1 if not isinstance(exp1,dict) else exp2
valores = dic['valores']
result = []
for col in valores:
num1 = int(col)
num2 = int(val)
result.append(mt.log(num1,num2))
newdict = {
'valores':result,
'columna': dic['columna']
}
return newdict
#si ninguna es diccionario
else:
try:
num1 = int(exp1)
num2 = int(exp2)
return mt.log(num1,num2)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_log10(column_mathtrig):
def __init__(self, exp, alias):
self.exp = exp
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp = self.exp.ejecutar(tables)
if isinstance(exp,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
if isinstance(exp,dict):
#es diccionario
registros = exp['valores']
result = []
for reg in registros:
try:
num = float(reg)
result.append(mt.log10(num))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
exp['valores'] = result
return exp
else:
#no es diccionario
num = float(exp)
return mt.log10(num)
class math_min_scale(column_mathtrig):
def __init__(self,exp,alias):
self.exp = exp
self.alias = alias
def ejecutar(self,tables):
#Verificamos si viene un diccionario o un valor
exp = self.exp.ejecutar(tables)
if isinstance(exp,CError):
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
if isinstance(exp,dict):
#es diccionario
registros = exp['valores']
result = []
for reg in registros:
try:
num = int(reg)
result.append(mt.min_scale(num))
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
exp['valores'] = result
return exp
else:
#no es diccionario
try:
num = int(exp)
return mt.min_scale(num)
except ValueError:
e = CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return | |
Godunov update
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*apdq[m,LL-1:UL-1]
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*amdq[m,LL-1:UL-1]
elif state.problem_data['method'] == 'h_box':
# # add corrections
wave,s,amdq,apdq,f_corr_l,f_corr_r = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*(apdq[m,LL-1:UL-1] - f_corr_r[m,LL-1:UL-1])
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*(amdq[m,LL-1:UL-1] + f_corr_l[m,LL-1:UL-1])
elif state.problem_data['method'] == 'h_box_wave':
# # add corrections
state.problem_data['arrival_state'] = False
wave,s,amdq,apdq,q_hbox_initial,aux_hbox = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
iw = state.problem_data['wall_position'] + self.num_ghost - 1
q_last = q[:,iw:iw+2].copy()
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*apdq[m,LL-1:UL-1]
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*amdq[m,LL-1:UL-1]
# check the arrivals
q[:,iw:iw+2] = q_last[:,:] # reset the wall cells
dt = self.dt
num_waves = self.num_waves
dx = grid.delta[0] * xpxc
alpha = state.problem_data['fraction']
arrival_times = np.array([0.0])
for mw in xrange(num_waves):
if (s[mw,iw-1] > 0 and (s[mw,iw-1] * dt > alpha * dx)):
arrival_times = np.append(arrival_times, alpha * dx / s[mw,iw-1])
if (s[mw,iw+1] < 0 and ( (-s[mw,iw+1]) * dt > (1 - alpha) * dx ) ):
arrival_times = np.append(arrival_times, -(1 - alpha) * dx / s[mw,iw+1])
arrival_times.sort()
n_arrival_times = len(arrival_times)
if n_arrival_times == 1 :
state.problem_data['arrival_state'] = False
else:
state.problem_data['arrival_state'] = True
s_cells = np.zeros((num_waves, 3, n_arrival_times))
s_cells[:,:,0] = s[:, iw-1:iw+2].copy()
wave_cells = np.zeros((num_eqn, num_waves, 3, n_arrival_times))
wave_cells[:,:,:,0] = wave[:,:,iw-1:iw+2].copy()
if state.problem_data['arrival_state'] == False:
q[:,iw] -= dt/(alpha * dx) * apdq[:,iw-1]
q[:,iw+1] -= dt/((1 - alpha)*dx) * amdq[:,iw+1]
for mw in xrange(num_waves):
if (s[mw,iw] < 0):
q[:,iw-1] -= dt/dx * ( max(0, -s[mw,iw] * dt - alpha * dx) / (-s[mw,iw] * dt) * wave[:,mw,iw] )
q[:,iw] -= dt/(alpha * dx) * (min(-s[mw,iw] * dt, alpha * dx) / (-s[mw,iw] * dt) * wave[:,mw,iw] )
elif (s[mw,iw] > 0):
q[:,iw+1] -= dt/((1 - alpha)*dx) * (min(s[mw,iw] * dt, (1 - alpha) * dx) / (s[mw,iw] * dt) * wave[:,mw,iw] )
q[:,iw+2] -= dt/dx * ( (max(0, s[mw,iw] * dt - (1 - alpha) * dx) / s[mw,iw] * dt) * wave[:,mw,iw] )
if state.problem_data['arrival_state'] == True:
## update q_hbox
for i in xrange(1, n_arrival_times):
q_hbox = q_hbox_initial.copy()
for mw in xrange(num_waves):
if s[mw,iw-2] > 0:
q_hbox[:,0] -= arrival_times[i] / dx * (max(0, s[mw,iw-2] * arrival_times[i] - alpha * dx) / (s[mw,iw-2] * arrival_times[i]) * wave[:,mw,iw-2])
if s[mw, iw-1] < 0:
q_hbox[:,0] -= arrival_times[i] / dx * (min(-s[mw,iw-1] * arrival_times[i], (1 - alpha) * dx) / (-s[mw,iw-1] * arrival_times[i]) * wave[:,mw,iw-1])
if s_cells[mw,0,i] > 0:
for j in xrange(i):
q_hbox[:,0] -= (arrival_times[j+1] - arrival_times[j]) / dx * (wave_cells[:,mw,0,j])
if s_cells[mw,0,i] * arrival_times[i] > alpha * dx - 1e-14:
# check the arrival wave
wave_cells[:,mw,0,i] = 0.0
if s_cells[mw,1,i] < 0:
for j in xrange(i):
q_hbox[:,0] -= (arrival_times[i] - arrival_times[j]) / dx * (wave_cells[:,mw,1,j])
if s_cells[mw,1,i] > 0:
for j in xrange(i):
q_hbox[:,1] -= (arrival_times[i] - arrival_times[j]) / dx * (wave_cells[:,mw,1,j])
if s_cells[mw,2,i] < 0:
for j in xrange(i):
q_hbox[:,1] -= (arrival_times[j+1] - arrival_times[j]) / dx * (wave_cells[:,mw,2,j])
if (-s_cells[mw,2,i] * arrival_times[i]) > (1 - alpha) * dx - 1e-14:
# check the arrival wave
wave_cells[:,mw,2,i] = 0.0
if s[mw,iw+1] > 0:
q_hbox[:,1] -= arrival_times[i] / dx * (min(s[mw,iw+1] * arrival_times[i], alpha * dx) / (-s[mw,iw+1] * arrival_times[i]) * wave[:,mw,iw+1])
if s[mw,iw+2] < 0:
q_hbox[:,1] -= arrival_times[i] / dx * (max(0, -s[mw,iw+2] * arrival_times[i] - (1 - alpha) * dx) / (-s[mw,iw+2] * arrival_times[i]) * wave[:,mw,iw+2])
wave_cells[:,:,1,i],s_cells[:,1,i],amdq_arr,apdq_arr = self.rp(q_hbox[:,0],q_hbox[:,1],aux_hbox[:,0],aux_hbox[:,1],state.problem_data)
## update q[iw-1], q[iw], q[iw+1] and q[iw+2]
arrival_times = np.append(arrival_times, dt)
n_arrival_times = len(arrival_times)
for mw in xrange(num_waves):
for i in xrange(n_arrival_times-1):
if s_cells[mw,0,i] > 0:
q[:,iw] -= (arrival_times[i+1] - arrival_times[i]) / (alpha * dx) * (wave_cells[:,mw,0,i])
if s_cells[mw,2,i] < 0:
q[:,iw+1] -= (arrival_times[i+1] - arrival_times[i]) / ((1 - alpha) * dx) * (wave_cells[:,mw,2,i])
if s_cells[mw,1,i] < 0:
q[:,iw-1] -= (dt - arrival_times[i]) / dx * ( max(0, -s_cells[mw,1,i] * (dt - arrival_times[i]) - alpha * dx) / (-s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
q[:,iw] -= (dt - arrival_times[i]) / (alpha * dx) * ( min(-s_cells[mw,1, i] * (dt - arrival_times[i]), alpha * dx) / (-s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
if s_cells[mw,1,i] > 0:
q[:,iw+1] -= (dt - arrival_times[i]) / ((1 - alpha) * dx) * ( min(s_cells[mw,1, i] * (dt - arrival_times[i]), (1 - alpha) * dx) / (s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
q[:,iw+2] -= (dt - arrival_times[i]) / dx * ( max(0, s_cells[mw,1,i] * (dt - arrival_times[i]) - (1- alpha) * dx) / (s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
# Compute maximum wave speed
# add additional conditions for h-box
cfl = 0.0
if 'method' not in state.problem_data:
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
elif state.problem_data['method'] == 'h_box':
# print("h_box corrected dtdx")
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx_hbox[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx_hbox[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
elif state.problem_data['method'] == 'h_box_wave':
# print("h_box corrected dtdx")
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx_hbox[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx_hbox[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
# If we are doing slope limiting we have more work to do
if self.order == 2:
# Initialize flux corrections
f = np.zeros( (num_eqn,grid.num_cells[0] + 2*self.num_ghost) )
# Apply Limiters to waves
if (limiter > 0).any():
wave = tvd.limit(state.num_eqn,wave,s,limiter,dtdx)
# Compute correction fluxes for second order q_{xx} terms
dtdxave = 0.5 * (dtdx[LL-1:UL-1] + dtdx[LL:UL])
if self.fwave:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
ssign = np.sign(s[mw,LL-1:UL-1])
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * ssign * om * wave[m,mw,LL-1:UL-1]
else:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * sabs * om * wave[m,mw,LL-1:UL-1]
# Update q by differencing correction fluxes
for m in xrange(num_eqn):
q[m,LL:UL-1] -= dtdx[LL:UL-1] * (f[m,LL+1:UL] - f[m,LL:UL-1])
else: raise Exception("Unrecognized kernel_language; choose 'Fortran' or 'Python'")
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(num_ghost,self.auxbc)
# ============================================================================
# ClawPack 2d Solver Class
# ============================================================================
class ClawSolver2D(ClawSolver):
r"""
2D Classic (Clawpack) solver.
Solve using the wave propagation algorithms of <NAME>'s
Clawpack code (www.clawpack.org).
In addition to the attributes of ClawSolver1D, ClawSolver2D
also has the following options:
.. attribute:: dimensional_split
If True, use dimensional splitting (Godunov splitting).
Dimensional splitting with Strang splitting is not supported
at present but could easily be enabled if necessary.
If False, use unsplit Clawpack algorithms, possibly including
transverse Riemann solves.
.. attribute:: transverse_waves
If dimensional_split is True, this option has no effect. If
dimensional_split is False, then transverse_waves should be one of
the following values:
ClawSolver2D.no_trans: Transverse Riemann solver
not used. The stable CFL for this algorithm is 0.5. Not recommended.
ClawSolver2D.trans_inc: Transverse increment waves are computed
and propagated.
ClawSolver2D.trans_cor: Transverse increment waves and transverse
correction waves are computed and propagated.
Note that only the fortran routines are supported for now in 2D.
"""
__doc__ += add_parent_doc(ClawSolver)
no_trans = 0
trans_inc = 1
trans_cor = 2
def __init__(self,riemann_solver=None, claw_package=None):
r"""
Create 2d Clawpack solver
See :class:`ClawSolver2D` for more info.
"""
self.dimensional_split = True
self.transverse_waves = self.trans_inc
self.num_dim = 2
self.reflect_index = [1,2]
self.aux1 = None
self.aux2 = None
self.aux3 = None
self.work = None
super(ClawSolver2D,self).__init__(riemann_solver, claw_package)
def _check_cfl_settings(self):
if (not self.dimensional_split) and (self.transverse_waves==0):
cfl_recommended = 0.5
else:
cfl_recommended = 1.0
if self.cfl_max > cfl_recommended:
import warnings
warnings.warn('cfl_max is set higher than the recommended value of %s' % cfl_recommended)
warnings.warn(str(self.cfl_desired))
def _allocate_workspace(self,solution):
r"""
Pack parameters into format recognized by Clawpack (Fortran) code.
Sets the method array and the cparam common block for the Riemann solver.
"""
import numpy as np
state = solution.state
num_eqn,num_aux,num_waves,num_ghost,aux = state.num_eqn,state.num_aux,self.num_waves,self.num_ghost,state.aux
#The following is a hack to work around an issue
#with f2py. It involves wastefully allocating three arrays.
#f2py seems not able to handle multiple zero-size arrays being passed.
# it appears the bug is related to f2py/src/fortranobject.c line 841.
if aux is None: num_aux=1
grid = state.grid
maxmx,maxmy = grid.num_cells[0],grid.num_cells[1]
maxm = | |
<gh_stars>0
import datetime
from onecodex.exceptions import OneCodexException
class set_style(object):
"""Inserts a <style> block in <head>. Used to override default styling.
Parameters
----------
style : `string`
CSS to override default styling of the entire report.
"""
def __init__(self, style):
if not style.startswith("\n"):
style = "\n" + style
if not style.endswith("\n"):
style += "\n"
self.style = style
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
block = '\n<style type="text/css">{}</style>\n'.format(self.style)
return {"text/css": block}, {"onecodex": "head.style"}
class set_center_header(object):
"""Inserts text in the center of the header at the top of every page of the report.
Parameters
----------
text : `string`
Centered text to display to the top of every page.
style : `string`, optional
CSS to override default styling.
"""
def __init__(self, text, style=None):
self.text = text
self.style = "" if style is None else style
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
return {
"text/html": '<div id="centerheader" style="{}">{}</div>'.format(self.style, self.text)
}
class set_date(object):
"""Set report-wide date, overriding default (today).
Parameters
----------
date : `string`
Date to use in page footers and cover pages. Default uses January 1, 1900 format.
style : `string`, optional
CSS to override default styling.
"""
def __init__(self, date=None, style=None):
self.date = (
"{dt:%B} {dt.day}, {dt:%Y}".format(dt=datetime.datetime.now()) if date is None else date
)
self.style = "" if style is None else style
try:
ipy = get_ipython()
ipy.meta["customdate"] = self.date
except NameError:
pass
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
return (
{"text/html": '<div id="reportdate" style="{}">{}</div>'.format(self.style, self.date)},
{"onecodex": "customdate"},
)
class title(object):
"""Insert an <h2> title block. Used for either section or report titles.
Parameters
----------
text : `string`
Text to insert into <h2> block.
style : `string`, optional
CSS to override default styling.
"""
def __init__(self, text, style=None):
self.text = text
self.style = "" if style is None else style
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
return {"text/html": '<h2 class="title" style="{}">{}</h2>'.format(self.style, self.text)}
class set_logo(object):
"""Place a custom logo at the top of every page of the report.
Parameters
----------
url : `string`
Path to the custom report logo as a URL. If a local file, use file://.
position : {'left', 'center', 'right'}, optional
Where to place the custom logo. Default is the top-left corner of every page.
style : `string`, optional
CSS to override default styling.
"""
def __init__(self, url, position="left", style=None):
self.url = url
self.style = "" if style is None else style
if position == "left":
self.classes = "logo-left"
elif position == "center":
self.classes = "logo-center"
elif position == "right":
self.classes = "logo-right"
else:
raise OneCodexException("position must be one of: left, right, center")
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
return {
"text/html": '<img src="{}" width="120px" class="logo {}" style="{}" />'.format(
self.url, self.classes, self.style
)
}
class legend(object):
"""Add a figure legend. Call this after generating a figure.
Parameters
----------
text : `string`
The meat of the figure legend.
heading : `string`, optional
Bolded text to appear before the meat of the legend.
fignum : `string` or `integer`, optional
The number of this figure. If not specified, will auto-increment every time this method is
called, starting with Figure 1.
style : `string`, optional
CSS to override default styling.
Notes
-----
Figure legend text looks something like this:
<b>Figure {fignum}. {heading}</b> {text}
The figure number will be auto-incremented every time legend() is called, but can be overriden
by passing the fignum kwarg.
"""
def __init__(self, text, heading=None, fignum=None, style=None):
self.heading = "" if heading is None else "{} ".format(heading)
self.text = text
self.style = "" if style is None else style
if fignum is None:
try:
ipy = get_ipython()
self.fignum = ipy.meta.get("figure_count", 0) + 1
except NameError:
raise OneCodexException("Must be run from within IPython")
ipy.meta["figure_count"] = self.fignum
else:
self.fignum = fignum
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
return {
"text/html": '<div class="figurelegend" style="{}"><b>Figure {}. {}</b>{}</div>'.format(
self.style, self.fignum, self.heading, self.text
)
}
def reference(text=None, label=None):
"""Add a reference to the bibliography and insert a superscript number.
Parameters
----------
text : `string`, optional
The complete text of the reference, e.g. Roo, et al. "How to Python." Nature, 2019.
label : `string`, optional
A short label to describe this reference.
Notes
-----
1) Every time reference() is called, the reference number is auto-incremented. That is, the first
time you call this, a superscript 1 is inserted. The next time you call this (with a different
reference), a superscript 2 is inserted.
2) This function returns HTML. It is meant to be used inside a Markdown cell in your IPython
notebook, or concatenated together with another string that's used as input to a function
here in the `report` module.
Examples
--------
You want to insert a reference at the current position, and store it using a short label so you
can access it again without typing the entire reference text.
>>> reference('Roo, et al. "How to Python." Nature, 2019.', 'roo1')
'<sup class="reference">1</sup>'
The next time you want to insert this reference, just use the short 'roo1' label.
>>> reference(label='roo1')
'<sup class="reference">1</sup>'
You want to insert a list of references in a single IPython cell, each with a short label, and
use them all later without displaying any of the superscript reference numbers now.
_ = reference('Roo, et al. "How to Python." Nature, 2019.', 'roo1')
_ = reference('Roo, et al. "The Tao of Roo." Random House, 2018.', 'roo2')
_ = reference('Roo, et al. "Roo and the Art of Database Maintenance." N/A, 2017.', 'roo3')
~~~ And later, in a Markdown cell in your IPython notebook ~~~
As Roo, et al. outlined in a previous work{reference(label='roo2')}, all play and no work
makes for a happy dog. Later, the same authors applied similar reasoning to the care of
Burmese Pythons{reference(label='roo1')}. By comparing the care of dogs and Pythons to
SQL databases, Roo, et al. make a compelling argument for why writing docstrings can be fun
and not just a chore{reference(label='roo3')}.
You want to insert a reference into a figure legend, using `report.legend`.
report.legend(
'As you can clearly see in the above figure, the data supports my conclusion '
'and does not support the conclusion of my peers{reference(label='similar_paper1')}. '
'This is most likely because I am smarter and more attractive than the authors of '
'those other publications{reference(label='ego_and_insecurity1')}.'
)
"""
if text is None and label is None:
raise OneCodexException("Please specify at least one of: text, label")
try:
ipy = get_ipython()
ref_list = ipy.meta.get("references", {})
except NameError:
raise OneCodexException("Must be run from within IPython")
def to_html(ref_num):
return '<sup class="reference">{}</sup>'.format(ref_num)
if text is not None:
# has this reference already been cited?
for ref_label, (ref_num, ref_text) in ref_list.items():
if text == ref_text:
if label is not None and label != ref_label:
raise OneCodexException(
"Citation already in use with label={}".format(ref_label)
)
else:
break
else:
# reference has not been cited. is the label already in use?
if label is not None and label in ref_list.keys():
raise OneCodexException("Citation label={} already in use".format(label))
# create the citation and assign next number
if not ref_list:
ref_num = 1
else:
ref_num = max([x[0] for x in ref_list.values()]) + 1
if label is None:
ref_label = ref_num
else:
ref_label = label
ref_list[ref_label] = (ref_num, text)
ipy.meta["references"] = ref_list
return to_html(ref_num)
elif label is not None:
if label not in ref_list.keys():
raise OneCodexException("Cannot find citation with label={}".format(label))
return to_html(ref_list[label][0])
class bibliography(object):
"""Adds a bibliography containing all references cited using `report.reference`.
Parameters
----------
style : `string`, optional
CSS to override default styling.
"""
def __init__(self, style=None):
self.style = "" if style is None else style
try:
ipy = get_ipython()
ref_list = ipy.meta.get("references", {})
except NameError:
raise OneCodexException("Must be run from within IPython")
self.ref_list = ref_list
def display(self):
from IPython.display import display
display(self)
def _repr_mimebundle_(self, include=None, exclude=None):
cites = [
"<div>" "<h4>References</h4>",
'<dl class="bibliography" style="{}">'.format(self.style),
]
for ref_label, (ref_num, ref_text) in self.ref_list.items():
cites.append("<dt>{}</dt>".format(ref_num))
cites.append("<dd>{}</dd>".format(ref_text))
cites.append("</dl>")
| |
12:52:00,8.95,632.0,9.35
3189,11,4986.0,532,Travel and Other,1970-01-01 12:52:00,6.76,517.0,7.65
3190,4,1962.0,532,Child Care,1970-01-01 12:52:00,2.66,134.0,1.98
3191,5,449.0,532,Adult Care,1970-01-01 12:52:00,0.61,119.0,1.76
3192,6,32681.0,533,Work and Education,1970-01-01 12:53:00,44.31,2945.0,43.58
3193,10,27095.0,533,Leisure,1970-01-01 12:53:00,36.73,2418.0,35.78
3194,3,6601.0,533,Housework,1970-01-01 12:53:00,8.95,630.0,9.32
3195,11,4974.0,533,Travel and Other,1970-01-01 12:53:00,6.74,511.0,7.56
3196,4,1956.0,533,Child Care,1970-01-01 12:53:00,2.65,135.0,2.0
3197,5,451.0,533,Adult Care,1970-01-01 12:53:00,0.61,119.0,1.76
3198,6,32700.0,534,Work and Education,1970-01-01 12:54:00,44.33,2946.0,43.59
3199,10,27070.0,534,Leisure,1970-01-01 12:54:00,36.7,2418.0,35.78
3200,3,6605.0,534,Housework,1970-01-01 12:54:00,8.95,623.0,9.22
3201,11,4964.0,534,Travel and Other,1970-01-01 12:54:00,6.73,518.0,7.66
3202,4,1965.0,534,Child Care,1970-01-01 12:54:00,2.66,134.0,1.98
3203,5,454.0,534,Adult Care,1970-01-01 12:54:00,0.62,119.0,1.76
3204,6,32730.0,535,Work and Education,1970-01-01 12:55:00,44.37,2949.0,43.64
3205,10,27067.0,535,Leisure,1970-01-01 12:55:00,36.7,2418.0,35.78
3206,3,6601.0,535,Housework,1970-01-01 12:55:00,8.95,624.0,9.23
3207,11,4945.0,535,Travel and Other,1970-01-01 12:55:00,6.7,510.0,7.55
3208,4,1962.0,535,Child Care,1970-01-01 12:55:00,2.66,134.0,1.98
3209,5,453.0,535,Adult Care,1970-01-01 12:55:00,0.61,123.0,1.82
3210,6,33120.0,536,Work and Education,1970-01-01 12:56:00,44.9,2981.0,44.11
3211,10,26730.0,536,Leisure,1970-01-01 12:56:00,36.24,2371.0,35.08
3212,3,6620.0,536,Housework,1970-01-01 12:56:00,8.98,621.0,9.19
3213,11,4866.0,536,Travel and Other,1970-01-01 12:56:00,6.6,528.0,7.81
3214,4,1964.0,536,Child Care,1970-01-01 12:56:00,2.66,134.0,1.98
3215,5,458.0,536,Adult Care,1970-01-01 12:56:00,0.62,123.0,1.82
3216,6,33144.0,537,Work and Education,1970-01-01 12:57:00,44.94,2984.0,44.16
3217,10,26718.0,537,Leisure,1970-01-01 12:57:00,36.22,2372.0,35.1
3218,3,6626.0,537,Housework,1970-01-01 12:57:00,8.98,620.0,9.17
3219,11,4859.0,537,Travel and Other,1970-01-01 12:57:00,6.59,525.0,7.77
3220,4,1958.0,537,Child Care,1970-01-01 12:57:00,2.65,134.0,1.98
3221,5,453.0,537,Adult Care,1970-01-01 12:57:00,0.61,123.0,1.82
3222,6,33187.0,538,Work and Education,1970-01-01 12:58:00,44.99,2988.0,44.21
3223,10,26689.0,538,Leisure,1970-01-01 12:58:00,36.18,2368.0,35.04
3224,3,6622.0,538,Housework,1970-01-01 12:58:00,8.98,623.0,9.22
3225,11,4852.0,538,Travel and Other,1970-01-01 12:58:00,6.58,523.0,7.74
3226,4,1958.0,538,Child Care,1970-01-01 12:58:00,2.65,135.0,2.0
3227,5,450.0,538,Adult Care,1970-01-01 12:58:00,0.61,121.0,1.79
3228,6,33214.0,539,Work and Education,1970-01-01 12:59:00,45.03,2991.0,44.26
3229,10,26671.0,539,Leisure,1970-01-01 12:59:00,36.16,2369.0,35.05
3230,3,6626.0,539,Housework,1970-01-01 12:59:00,8.98,623.0,9.22
3231,11,4839.0,539,Travel and Other,1970-01-01 12:59:00,6.56,518.0,7.66
3232,4,1955.0,539,Child Care,1970-01-01 12:59:00,2.65,134.0,1.98
3233,5,453.0,539,Adult Care,1970-01-01 12:59:00,0.61,123.0,1.82
3234,6,33256.0,540,Work and Education,1970-01-01 13:00:00,45.09,2998.0,44.36
3235,10,26666.0,540,Leisure,1970-01-01 13:00:00,36.15,2360.0,34.92
3236,3,6625.0,540,Housework,1970-01-01 13:00:00,8.98,625.0,9.25
3237,11,4805.0,540,Travel and Other,1970-01-01 13:00:00,6.51,519.0,7.68
3238,4,1954.0,540,Child Care,1970-01-01 13:00:00,2.65,134.0,1.98
3239,5,452.0,540,Adult Care,1970-01-01 13:00:00,0.61,122.0,1.81
3240,6,33524.0,541,Work and Education,1970-01-01 13:01:00,45.45,2995.0,44.32
3241,10,24670.0,541,Leisure,1970-01-01 13:01:00,33.45,2222.0,32.88
3242,11,6642.0,541,Travel and Other,1970-01-01 13:01:00,9.01,662.0,9.8
3243,3,6553.0,541,Housework,1970-01-01 13:01:00,8.88,606.0,8.97
3244,4,1925.0,541,Child Care,1970-01-01 13:01:00,2.61,149.0,2.2
3245,5,444.0,541,Adult Care,1970-01-01 13:01:00,0.6,124.0,1.83
3246,6,33572.0,542,Work and Education,1970-01-01 13:02:00,45.52,2999.0,44.38
3247,10,24678.0,542,Leisure,1970-01-01 13:02:00,33.46,2227.0,32.95
3248,11,6580.0,542,Travel and Other,1970-01-01 13:02:00,8.92,652.0,9.65
3249,3,6569.0,542,Housework,1970-01-01 13:02:00,8.91,605.0,8.95
3250,4,1925.0,542,Child Care,1970-01-01 13:02:00,2.61,149.0,2.2
3251,5,434.0,542,Adult Care,1970-01-01 13:02:00,0.59,126.0,1.86
3252,6,33633.0,543,Work and Education,1970-01-01 13:03:00,45.6,3004.0,44.45
3253,10,24734.0,543,Leisure,1970-01-01 13:03:00,33.53,2226.0,32.94
3254,3,6575.0,543,Housework,1970-01-01 13:03:00,8.91,606.0,8.97
3255,11,6458.0,543,Travel and Other,1970-01-01 13:03:00,8.76,644.0,9.53
3256,4,1918.0,543,Child Care,1970-01-01 13:03:00,2.6,151.0,2.23
3257,5,440.0,543,Adult Care,1970-01-01 13:03:00,0.6,127.0,1.88
3258,6,33672.0,544,Work and Education,1970-01-01 13:04:00,45.65,3005.0,44.47
3259,10,24764.0,544,Leisure,1970-01-01 13:04:00,33.57,2228.0,32.97
3260,3,6591.0,544,Housework,1970-01-01 13:04:00,8.94,605.0,8.95
3261,11,6385.0,544,Travel and Other,1970-01-01 13:04:00,8.66,641.0,9.49
3262,4,1912.0,544,Child Care,1970-01-01 13:04:00,2.59,150.0,2.22
3263,5,434.0,544,Adult Care,1970-01-01 13:04:00,0.59,129.0,1.91
3264,6,33728.0,545,Work and Education,1970-01-01 13:05:00,45.73,3010.0,44.54
3265,10,24780.0,545,Leisure,1970-01-01 13:05:00,33.6,2235.0,33.07
3266,3,6589.0,545,Housework,1970-01-01 13:05:00,8.93,604.0,8.94
3267,11,6322.0,545,Travel and Other,1970-01-01 13:05:00,8.57,631.0,9.34
3268,4,1910.0,545,Child Care,1970-01-01 13:05:00,2.59,150.0,2.22
3269,5,429.0,545,Adult Care,1970-01-01 13:05:00,0.58,128.0,1.89
3270,6,34034.0,546,Work and Education,1970-01-01 13:06:00,46.14,3037.0,44.94
3271,10,24925.0,546,Leisure,1970-01-01 13:06:00,33.79,2246.0,33.23
3272,3,6632.0,546,Housework,1970-01-01 13:06:00,8.99,603.0,8.92
3273,11,5800.0,546,Travel and Other,1970-01-01 13:06:00,7.86,594.0,8.79
3274,4,1928.0,546,Child Care,1970-01-01 13:06:00,2.61,152.0,2.25
3275,5,439.0,546,Adult Care,1970-01-01 13:06:00,0.6,126.0,1.86
3276,6,34066.0,547,Work and Education,1970-01-01 13:07:00,46.19,3040.0,44.98
3277,10,24916.0,547,Leisure,1970-01-01 13:07:00,33.78,2242.0,33.18
3278,3,6645.0,547,Housework,1970-01-01 13:07:00,9.01,601.0,8.89
3279,11,5765.0,547,Travel and Other,1970-01-01 13:07:00,7.82,596.0,8.82
3280,4,1928.0,547,Child Care,1970-01-01 13:07:00,2.61,152.0,2.25
3281,5,438.0,547,Adult Care,1970-01-01 13:07:00,0.59,127.0,1.88
3282,6,34078.0,548,Work and Education,1970-01-01 13:08:00,46.2,3047.0,45.09
3283,10,24942.0,548,Leisure,1970-01-01 13:08:00,33.82,2248.0,33.26
3284,3,6646.0,548,Housework,1970-01-01 13:08:00,9.01,594.0,8.79
3285,11,5723.0,548,Travel and Other,1970-01-01 13:08:00,7.76,594.0,8.79
3286,4,1928.0,548,Child Care,1970-01-01 13:08:00,2.61,151.0,2.23
3287,5,441.0,548,Adult Care,1970-01-01 13:08:00,0.6,124.0,1.83
3288,6,34096.0,549,Work and Education,1970-01-01 13:09:00,46.23,3049.0,45.12
3289,10,24957.0,549,Leisure,1970-01-01 13:09:00,33.84,2247.0,33.25
3290,3,6641.0,549,Housework,1970-01-01 13:09:00,9.0,591.0,8.75
3291,11,5694.0,549,Travel and Other,1970-01-01 13:09:00,7.72,596.0,8.82
3292,4,1929.0,549,Child Care,1970-01-01 13:09:00,2.62,151.0,2.23
3293,5,441.0,549,Adult Care,1970-01-01 13:09:00,0.6,124.0,1.83
3294,6,34102.0,550,Work and Education,1970-01-01 13:10:00,46.23,3049.0,45.12
3295,10,24989.0,550,Leisure,1970-01-01 13:10:00,33.88,2244.0,33.21
3296,3,6645.0,550,Housework,1970-01-01 13:10:00,9.01,592.0,8.76
3297,11,5651.0,550,Travel and Other,1970-01-01 13:10:00,7.66,596.0,8.82
3298,4,1935.0,550,Child Care,1970-01-01 13:10:00,2.62,152.0,2.25
3299,5,436.0,550,Adult Care,1970-01-01 13:10:00,0.59,125.0,1.85
3300,6,34552.0,551,Work and Education,1970-01-01 13:11:00,46.85,3097.0,45.83
3301,10,25040.0,551,Leisure,1970-01-01 13:11:00,33.95,2225.0,32.92
3302,3,6600.0,551,Housework,1970-01-01 13:11:00,8.95,612.0,9.06
3303,11,5154.0,551,Travel and Other,1970-01-01 13:11:00,6.99,552.0,8.17
3304,4,1958.0,551,Child Care,1970-01-01 13:11:00,2.65,141.0,2.09
3305,5,454.0,551,Adult Care,1970-01-01 13:11:00,0.62,131.0,1.94
3306,6,34556.0,552,Work and Education,1970-01-01 13:12:00,46.85,3100.0,45.87
3307,10,25052.0,552,Leisure,1970-01-01 13:12:00,33.97,2225.0,32.92
3308,3,6594.0,552,Housework,1970-01-01 13:12:00,8.94,611.0,9.04
3309,11,5143.0,552,Travel and Other,1970-01-01 13:12:00,6.97,551.0,8.15
3310,4,1957.0,552,Child Care,1970-01-01 13:12:00,2.65,140.0,2.07
3311,5,456.0,552,Adult Care,1970-01-01 13:12:00,0.62,131.0,1.94
3312,6,34572.0,553,Work and Education,1970-01-01 13:13:00,46.87,3101.0,45.89
3313,10,25037.0,553,Leisure,1970-01-01 13:13:00,33.94,2216.0,32.79
3314,3,6591.0,553,Housework,1970-01-01 13:13:00,8.94,615.0,9.1
3315,11,5152.0,553,Travel and Other,1970-01-01 13:13:00,6.99,553.0,8.18
3316,4,1952.0,553,Child Care,1970-01-01 13:13:00,2.65,141.0,2.09
3317,5,454.0,553,Adult Care,1970-01-01 13:13:00,0.62,132.0,1.95
3318,6,34585.0,554,Work and Education,1970-01-01 13:14:00,46.89,3100.0,45.87
3319,10,25056.0,554,Leisure,1970-01-01 13:14:00,33.97,2212.0,32.73
3320,3,6581.0,554,Housework,1970-01-01 13:14:00,8.92,616.0,9.12
3321,11,5121.0,554,Travel and Other,1970-01-01 13:14:00,6.94,555.0,8.21
3322,4,1957.0,554,Child Care,1970-01-01 13:14:00,2.65,143.0,2.12
3323,5,458.0,554,Adult Care,1970-01-01 13:14:00,0.62,132.0,1.95
3324,6,34607.0,555,Work and Education,1970-01-01 13:15:00,46.92,3104.0,45.93
3325,10,25052.0,555,Leisure,1970-01-01 13:15:00,33.97,2207.0,32.66
3326,3,6580.0,555,Housework,1970-01-01 13:15:00,8.92,619.0,9.16
3327,11,5109.0,555,Travel and Other,1970-01-01 13:15:00,6.93,555.0,8.21
3328,4,1952.0,555,Child Care,1970-01-01 13:15:00,2.65,143.0,2.12
3329,5,458.0,555,Adult Care,1970-01-01 13:15:00,0.62,130.0,1.92
3330,6,35278.0,556,Work and Education,1970-01-01 13:16:00,47.83,3127.0,46.27
3331,10,24339.0,556,Leisure,1970-01-01 13:16:00,33.0,2179.0,32.24
3332,3,6598.0,556,Housework,1970-01-01 13:16:00,8.95,622.0,9.2
3333,11,5100.0,556,Travel and Other,1970-01-01 13:16:00,6.91,555.0,8.21
3334,4,1972.0,556,Child Care,1970-01-01 13:16:00,2.67,143.0,2.12
3335,5,471.0,556,Adult Care,1970-01-01 13:16:00,0.64,132.0,1.95
3336,6,35294.0,557,Work and Education,1970-01-01 13:17:00,47.85,3130.0,46.32
3337,10,24341.0,557,Leisure,1970-01-01 13:17:00,33.0,2188.0,32.38
3338,3,6604.0,557,Housework,1970-01-01 13:17:00,8.95,619.0,9.16
3339,11,5080.0,557,Travel and Other,1970-01-01 13:17:00,6.89,548.0,8.11
3340,4,1969.0,557,Child Care,1970-01-01 13:17:00,2.67,145.0,2.15
3341,5,470.0,557,Adult Care,1970-01-01 13:17:00,0.64,128.0,1.89
3342,6,35319.0,558,Work and Education,1970-01-01 13:18:00,47.88,3133.0,46.36
3343,10,24350.0,558,Leisure,1970-01-01 13:18:00,33.01,2190.0,32.41
3344,3,6589.0,558,Housework,1970-01-01 13:18:00,8.93,617.0,9.13
3345,11,5073.0,558,Travel and Other,1970-01-01 13:18:00,6.88,547.0,8.09
3346,4,1965.0,558,Child Care,1970-01-01 13:18:00,2.66,144.0,2.13
3347,5,462.0,558,Adult Care,1970-01-01 13:18:00,0.63,127.0,1.88
3348,6,35330.0,559,Work and Education,1970-01-01 13:19:00,47.9,3135.0,46.39
3349,10,24343.0,559,Leisure,1970-01-01 13:19:00,33.0,2194.0,32.47
3350,3,6578.0,559,Housework,1970-01-01 13:19:00,8.92,619.0,9.16
3351,11,5073.0,559,Travel and Other,1970-01-01 13:19:00,6.88,538.0,7.96
3352,4,1972.0,559,Child Care,1970-01-01 13:19:00,2.67,144.0,2.13
3353,5,462.0,559,Adult Care,1970-01-01 13:19:00,0.63,128.0,1.89
3354,6,35351.0,560,Work and Education,1970-01-01 13:20:00,47.93,3137.0,46.42
3355,10,24344.0,560,Leisure,1970-01-01 13:20:00,33.01,2186.0,32.35
3356,3,6572.0,560,Housework,1970-01-01 13:20:00,8.91,621.0,9.19
3357,11,5062.0,560,Travel and Other,1970-01-01 13:20:00,6.86,543.0,8.03
3358,4,1967.0,560,Child Care,1970-01-01 13:20:00,2.67,143.0,2.12
3359,5,462.0,560,Adult Care,1970-01-01 13:20:00,0.63,128.0,1.89
3360,6,35700.0,561,Work and Education,1970-01-01 13:21:00,48.4,3177.0,47.01
3361,10,24135.0,561,Leisure,1970-01-01 13:21:00,32.72,2163.0,32.01
3362,3,6582.0,561,Housework,1970-01-01 13:21:00,8.92,617.0,9.13
3363,11,4884.0,561,Travel and Other,1970-01-01 13:21:00,6.62,524.0,7.75
3364,4,1995.0,561,Child Care,1970-01-01 13:21:00,2.7,148.0,2.19
3365,5,462.0,561,Adult Care,1970-01-01 13:21:00,0.63,129.0,1.91
3366,6,35718.0,562,Work and Education,1970-01-01 13:22:00,48.43,3175.0,46.98
3367,10,24125.0,562,Leisure,1970-01-01 13:22:00,32.71,2162.0,31.99
3368,3,6592.0,562,Housework,1970-01-01 13:22:00,8.94,621.0,9.19
3369,11,4858.0,562,Travel and Other,1970-01-01 13:22:00,6.59,528.0,7.81
3370,4,2006.0,562,Child Care,1970-01-01 13:22:00,2.72,147.0,2.18
3371,5,459.0,562,Adult Care,1970-01-01 13:22:00,0.62,125.0,1.85
3372,6,35738.0,563,Work and Education,1970-01-01 13:23:00,48.45,3174.0,46.97
3373,10,24116.0,563,Leisure,1970-01-01 13:23:00,32.7,2159.0,31.95
3374,3,6601.0,563,Housework,1970-01-01 13:23:00,8.95,622.0,9.2
3375,11,4834.0,563,Travel and Other,1970-01-01 13:23:00,6.55,529.0,7.83
3376,4,2013.0,563,Child Care,1970-01-01 13:23:00,2.73,147.0,2.18
3377,5,456.0,563,Adult Care,1970-01-01 13:23:00,0.62,127.0,1.88
3378,6,35746.0,564,Work and Education,1970-01-01 13:24:00,48.46,3174.0,46.97
3379,10,24118.0,564,Leisure,1970-01-01 13:24:00,32.7,2160.0,31.96
3380,3,6597.0,564,Housework,1970-01-01 13:24:00,8.94,625.0,9.25
3381,11,4820.0,564,Travel and Other,1970-01-01 13:24:00,6.53,527.0,7.8
3382,4,2020.0,564,Child Care,1970-01-01 13:24:00,2.74,147.0,2.18
3383,5,457.0,564,Adult Care,1970-01-01 13:24:00,0.62,125.0,1.85
3384,6,35755.0,565,Work and Education,1970-01-01 13:25:00,48.48,3176.0,47.0
3385,10,24125.0,565,Leisure,1970-01-01 13:25:00,32.71,2162.0,31.99
3386,3,6595.0,565,Housework,1970-01-01 13:25:00,8.94,623.0,9.22
3387,11,4808.0,565,Travel and Other,1970-01-01 13:25:00,6.52,526.0,7.78
3388,4,2020.0,565,Child Care,1970-01-01 13:25:00,2.74,147.0,2.18
3389,5,455.0,565,Adult Care,1970-01-01 13:25:00,0.62,124.0,1.83
3390,6,35924.0,566,Work and Education,1970-01-01 13:26:00,48.71,3190.0,47.2
3391,10,24033.0,566,Leisure,1970-01-01 13:26:00,32.58,2165.0,32.04
3392,3,6593.0,566,Housework,1970-01-01 13:26:00,8.94,619.0,9.16
3393,11,4719.0,566,Travel and Other,1970-01-01 13:26:00,6.4,513.0,7.59
3394,4,2018.0,566,Child Care,1970-01-01 13:26:00,2.74,142.0,2.1
3395,5,471.0,566,Adult Care,1970-01-01 13:26:00,0.64,129.0,1.91
3396,6,35942.0,567,Work and Education,1970-01-01 13:27:00,48.73,3191.0,47.22
3397,10,24053.0,567,Leisure,1970-01-01 13:27:00,32.61,2158.0,31.93
3398,3,6579.0,567,Housework,1970-01-01 13:27:00,8.92,621.0,9.19
3399,11,4694.0,567,Travel and Other,1970-01-01 13:27:00,6.36,515.0,7.62
3400,4,2019.0,567,Child Care,1970-01-01 13:27:00,2.74,143.0,2.12
3401,5,471.0,567,Adult Care,1970-01-01 13:27:00,0.64,130.0,1.92
3402,6,35960.0,568,Work and Education,1970-01-01 13:28:00,48.75,3195.0,47.28
3403,10,24067.0,568,Leisure,1970-01-01 13:28:00,32.63,2156.0,31.9
3404,3,6577.0,568,Housework,1970-01-01 13:28:00,8.92,618.0,9.14
3405,11,4672.0,568,Travel and Other,1970-01-01 13:28:00,6.33,517.0,7.65
3406,4,2017.0,568,Child Care,1970-01-01 13:28:00,2.73,144.0,2.13
3407,5,465.0,568,Adult Care,1970-01-01 13:28:00,0.63,128.0,1.89
3408,6,35974.0,569,Work and Education,1970-01-01 13:29:00,48.77,3195.0,47.28
3409,10,24068.0,569,Leisure,1970-01-01 13:29:00,32.63,2156.0,31.9
3410,3,6573.0,569,Housework,1970-01-01 13:29:00,8.91,620.0,9.17
3411,11,4662.0,569,Travel and Other,1970-01-01 13:29:00,6.32,516.0,7.64
3412,4,2018.0,569,Child Care,1970-01-01 13:29:00,2.74,144.0,2.13
3413,5,463.0,569,Adult Care,1970-01-01 13:29:00,0.63,127.0,1.88
3414,6,35989.0,570,Work and Education,1970-01-01 13:30:00,48.79,3198.0,47.32
3415,10,24058.0,570,Leisure,1970-01-01 13:30:00,32.62,2164.0,32.02
3416,3,6583.0,570,Housework,1970-01-01 13:30:00,8.93,613.0,9.07
3417,11,4648.0,570,Travel and Other,1970-01-01 13:30:00,6.3,512.0,7.58
3418,4,2018.0,570,Child Care,1970-01-01 13:30:00,2.74,143.0,2.12
3419,5,462.0,570,Adult Care,1970-01-01 13:30:00,0.63,128.0,1.89
3420,6,36282.0,571,Work and Education,1970-01-01 13:31:00,49.19,3242.0,47.97
3421,10,22804.0,571,Leisure,1970-01-01 13:31:00,30.92,2046.0,30.28
3422,3,6523.0,571,Housework,1970-01-01 13:31:00,8.84,606.0,8.97
3423,11,5636.0,571,Travel and Other,1970-01-01 13:31:00,7.64,599.0,8.86
3424,4,2042.0,571,Child Care,1970-01-01 13:31:00,2.77,143.0,2.12
3425,5,471.0,571,Adult Care,1970-01-01 13:31:00,0.64,122.0,1.81
3426,6,36302.0,572,Work and Education,1970-01-01 13:32:00,49.22,3243.0,47.99
3427,10,22808.0,572,Leisure,1970-01-01 13:32:00,30.92,2049.0,30.32
3428,3,6511.0,572,Housework,1970-01-01 13:32:00,8.83,609.0,9.01
3429,11,5640.0,572,Travel and Other,1970-01-01 13:32:00,7.65,593.0,8.77
3430,4,2033.0,572,Child Care,1970-01-01 13:32:00,2.76,142.0,2.1
3431,5,464.0,572,Adult Care,1970-01-01 13:32:00,0.63,122.0,1.81
3432,6,36331.0,573,Work and Education,1970-01-01 13:33:00,49.26,3250.0,48.09
3433,10,22838.0,573,Leisure,1970-01-01 13:33:00,30.96,2050.0,30.33
3434,3,6500.0,573,Housework,1970-01-01 13:33:00,8.81,610.0,9.03
3435,11,5585.0,573,Travel and Other,1970-01-01 13:33:00,7.57,584.0,8.64
3436,4,2039.0,573,Child Care,1970-01-01 13:33:00,2.76,139.0,2.06
3437,5,465.0,573,Adult Care,1970-01-01 13:33:00,0.63,125.0,1.85
3438,6,36346.0,574,Work and Education,1970-01-01 13:34:00,49.28,3250.0,48.09
3439,10,22868.0,574,Leisure,1970-01-01 13:34:00,31.0,2044.0,30.25
3440,3,6500.0,574,Housework,1970-01-01 13:34:00,8.81,611.0,9.04
3441,11,5540.0,574,Travel and Other,1970-01-01 13:34:00,7.51,588.0,8.7
3442,4,2035.0,574,Child Care,1970-01-01 13:34:00,2.76,140.0,2.07
3443,5,469.0,574,Adult Care,1970-01-01 13:34:00,0.64,125.0,1.85
3444,6,36368.0,575,Work and Education,1970-01-01 13:35:00,49.31,3252.0,48.12
3445,10,22874.0,575,Leisure,1970-01-01 13:35:00,31.01,2045.0,30.26
3446,3,6506.0,575,Housework,1970-01-01 13:35:00,8.82,609.0,9.01
3447,11,5508.0,575,Travel and Other,1970-01-01 13:35:00,7.47,588.0,8.7
3448,4,2034.0,575,Child Care,1970-01-01 13:35:00,2.76,141.0,2.09
3449,5,468.0,575,Adult Care,1970-01-01 13:35:00,0.63,123.0,1.82
3450,6,36576.0,576,Work and Education,1970-01-01 13:36:00,49.59,3272.0,48.42
3451,10,22894.0,576,Leisure,1970-01-01 13:36:00,31.04,2051.0,30.35
3452,3,6498.0,576,Housework,1970-01-01 13:36:00,8.81,604.0,8.94
3453,11,5274.0,576,Travel and Other,1970-01-01 13:36:00,7.15,563.0,8.33
3454,4,2039.0,576,Child Care,1970-01-01 13:36:00,2.76,142.0,2.1
3455,5,477.0,576,Adult Care,1970-01-01 13:36:00,0.65,126.0,1.86
3456,6,36608.0,577,Work and Education,1970-01-01 13:37:00,49.63,3275.0,48.46
3457,10,22901.0,577,Leisure,1970-01-01 13:37:00,31.05,2060.0,30.48
3458,3,6500.0,577,Housework,1970-01-01 13:37:00,8.81,600.0,8.88
3459,11,5235.0,577,Travel and Other,1970-01-01 13:37:00,7.1,560.0,8.29
3460,4,2037.0,577,Child Care,1970-01-01 13:37:00,2.76,141.0,2.09
3461,5,477.0,577,Adult Care,1970-01-01 13:37:00,0.65,122.0,1.81
3462,6,36634.0,578,Work and Education,1970-01-01 13:38:00,49.67,3276.0,48.48
3463,10,22919.0,578,Leisure,1970-01-01 13:38:00,31.07,2058.0,30.45
3464,3,6504.0,578,Housework,1970-01-01 13:38:00,8.82,605.0,8.95
3465,11,5184.0,578,Travel and Other,1970-01-01 13:38:00,7.03,556.0,8.23
3466,4,2039.0,578,Child Care,1970-01-01 13:38:00,2.76,140.0,2.07
3467,5,478.0,578,Adult Care,1970-01-01 13:38:00,0.65,123.0,1.82
3468,6,36648.0,579,Work and Education,1970-01-01 13:39:00,49.69,3280.0,48.54
3469,10,22928.0,579,Leisure,1970-01-01 13:39:00,31.09,2060.0,30.48
3470,3,6507.0,579,Housework,1970-01-01 13:39:00,8.82,599.0,8.86
3471,11,5160.0,579,Travel and Other,1970-01-01 13:39:00,7.0,554.0,8.2
3472,4,2038.0,579,Child Care,1970-01-01 13:39:00,2.76,142.0,2.1
3473,5,477.0,579,Adult Care,1970-01-01 13:39:00,0.65,123.0,1.82
3474,6,36656.0,580,Work and Education,1970-01-01 13:40:00,49.7,3281.0,48.55
3475,10,22937.0,580,Leisure,1970-01-01 13:40:00,31.1,2065.0,30.56
3476,3,6503.0,580,Housework,1970-01-01 13:40:00,8.82,602.0,8.91
3477,11,5148.0,580,Travel and Other,1970-01-01 13:40:00,6.98,546.0,8.08
3478,4,2040.0,580,Child Care,1970-01-01 13:40:00,2.77,142.0,2.1
3479,5,474.0,580,Adult Care,1970-01-01 13:40:00,0.64,122.0,1.81
3480,6,36907.0,581,Work and Education,1970-01-01 13:41:00,50.04,3296.0,48.77
3481,10,22878.0,581,Leisure,1970-01-01 13:41:00,31.02,2091.0,30.94
3482,3,6538.0,581,Housework,1970-01-01 13:41:00,8.86,593.0,8.77
3483,11,4891.0,581,Travel and Other,1970-01-01 13:41:00,6.63,516.0,7.64
3484,4,2060.0,581,Child Care,1970-01-01 13:41:00,2.79,143.0,2.12
3485,5,484.0,581,Adult Care,1970-01-01 13:41:00,0.66,119.0,1.76
3486,6,36918.0,582,Work and Education,1970-01-01 13:42:00,50.05,3296.0,48.77
3487,10,22903.0,582,Leisure,1970-01-01 13:42:00,31.05,2095.0,31.0
3488,3,6527.0,582,Housework,1970-01-01 13:42:00,8.85,596.0,8.82
3489,11,4861.0,582,Travel and Other,1970-01-01 13:42:00,6.59,507.0,7.5
3490,4,2068.0,582,Child Care,1970-01-01 13:42:00,2.8,144.0,2.13
3491,5,481.0,582,Adult Care,1970-01-01 13:42:00,0.65,120.0,1.78
3492,6,36929.0,583,Work and Education,1970-01-01 13:43:00,50.07,3299.0,48.82
3493,10,22912.0,583,Leisure,1970-01-01 13:43:00,31.06,2097.0,31.03
3494,3,6529.0,583,Housework,1970-01-01 13:43:00,8.85,593.0,8.77
3495,11,4842.0,583,Travel and Other,1970-01-01 13:43:00,6.56,504.0,7.46
3496,4,2065.0,583,Child Care,1970-01-01 13:43:00,2.8,144.0,2.13
3497,5,481.0,583,Adult Care,1970-01-01 13:43:00,0.65,121.0,1.79
3498,6,36930.0,584,Work and Education,1970-01-01 13:44:00,50.07,3300.0,48.83
3499,10,22911.0,584,Leisure,1970-01-01 13:44:00,31.06,2088.0,30.9
3500,3,6533.0,584,Housework,1970-01-01 13:44:00,8.86,596.0,8.82
3501,11,4839.0,584,Travel and Other,1970-01-01 13:44:00,6.56,508.0,7.52
3502,4,2066.0,584,Child Care,1970-01-01 13:44:00,2.8,143.0,2.12
3503,5,479.0,584,Adult Care,1970-01-01 13:44:00,0.65,123.0,1.82
3504,6,36945.0,585,Work and Education,1970-01-01 13:45:00,50.09,3302.0,48.86
3505,10,22934.0,585,Leisure,1970-01-01 13:45:00,31.09,2092.0,30.96
3506,3,6532.0,585,Housework,1970-01-01 13:45:00,8.86,596.0,8.82
3507,11,4802.0,585,Travel and Other,1970-01-01 13:45:00,6.51,501.0,7.41
3508,4,2063.0,585,Child Care,1970-01-01 13:45:00,2.8,144.0,2.13
3509,5,482.0,585,Adult Care,1970-01-01 13:45:00,0.65,123.0,1.82
3510,6,37310.0,586,Work and Education,1970-01-01 13:46:00,50.58,3347.0,49.53
3511,10,22475.0,586,Leisure,1970-01-01 13:46:00,30.47,2035.0,30.11
3512,3,6498.0,586,Housework,1970-01-01 13:46:00,8.81,607.0,8.98
3513,11,4890.0,586,Travel and Other,1970-01-01 13:46:00,6.63,510.0,7.55
3514,4,2088.0,586,Child Care,1970-01-01 13:46:00,2.83,136.0,2.01
3515,5,497.0,586,Adult Care,1970-01-01 13:46:00,0.67,123.0,1.82
3516,6,37330.0,587,Work and Education,1970-01-01 13:47:00,50.61,3348.0,49.54
3517,10,22484.0,587,Leisure,1970-01-01 13:47:00,30.48,2038.0,30.16
3518,3,6493.0,587,Housework,1970-01-01 13:47:00,8.8,607.0,8.98
3519,11,4874.0,587,Travel and Other,1970-01-01 13:47:00,6.61,506.0,7.49
3520,4,2089.0,587,Child Care,1970-01-01 13:47:00,2.83,138.0,2.04
3521,5,488.0,587,Adult Care,1970-01-01 13:47:00,0.66,121.0,1.79
3522,6,37357.0,588,Work and Education,1970-01-01 13:48:00,50.65,3350.0,49.57
3523,10,22452.0,588,Leisure,1970-01-01 13:48:00,30.44,2035.0,30.11
3524,3,6479.0,588,Housework,1970-01-01 13:48:00,8.78,612.0,9.06
3525,11,4891.0,588,Travel and Other,1970-01-01 13:48:00,6.63,497.0,7.35
3526,4,2096.0,588,Child Care,1970-01-01 13:48:00,2.84,140.0,2.07
3527,5,483.0,588,Adult Care,1970-01-01 13:48:00,0.65,124.0,1.83
3528,6,37369.0,589,Work and Education,1970-01-01 13:49:00,50.66,3350.0,49.57
3529,10,22452.0,589,Leisure,1970-01-01 13:49:00,30.44,2039.0,30.17
3530,3,6477.0,589,Housework,1970-01-01 13:49:00,8.78,611.0,9.04
3531,11,4886.0,589,Travel and Other,1970-01-01 13:49:00,6.62,496.0,7.34
3532,4,2092.0,589,Child Care,1970-01-01 13:49:00,2.84,140.0,2.07
3533,5,482.0,589,Adult Care,1970-01-01 13:49:00,0.65,122.0,1.81
3534,6,37383.0,590,Work and Education,1970-01-01 13:50:00,50.68,3352.0,49.6
3535,10,22454.0,590,Leisure,1970-01-01 13:50:00,30.44,2036.0,30.13
3536,3,6481.0,590,Housework,1970-01-01 13:50:00,8.79,610.0,9.03
3537,11,4865.0,590,Travel and Other,1970-01-01 13:50:00,6.6,495.0,7.32
3538,4,2095.0,590,Child Care,1970-01-01 13:50:00,2.84,143.0,2.12
3539,5,480.0,590,Adult Care,1970-01-01 13:50:00,0.65,122.0,1.81
3540,6,37569.0,591,Work and Education,1970-01-01 13:51:00,50.94,3370.0,49.87
3541,10,22291.0,591,Leisure,1970-01-01 13:51:00,30.22,2035.0,30.11
3542,3,6468.0,591,Housework,1970-01-01 13:51:00,8.77,607.0,8.98
3543,11,4828.0,591,Travel and Other,1970-01-01 13:51:00,6.55,479.0,7.09
3544,4,2105.0,591,Child Care,1970-01-01 13:51:00,2.85,144.0,2.13
3545,5,497.0,591,Adult Care,1970-01-01 13:51:00,0.67,123.0,1.82
3546,6,37584.0,592,Work and Education,1970-01-01 13:52:00,50.96,3371.0,49.88
3547,10,22297.0,592,Leisure,1970-01-01 13:52:00,30.23,2032.0,30.07
3548,3,6466.0,592,Housework,1970-01-01 13:52:00,8.77,606.0,8.97
3549,11,4809.0,592,Travel and Other,1970-01-01 13:52:00,6.52,483.0,7.15
3550,4,2102.0,592,Child Care,1970-01-01 13:52:00,2.85,144.0,2.13
3551,5,500.0,592,Adult Care,1970-01-01 13:52:00,0.68,122.0,1.81
3552,6,37601.0,593,Work and Education,1970-01-01 13:53:00,50.98,3372.0,49.9
3553,10,22301.0,593,Leisure,1970-01-01 13:53:00,30.24,2024.0,29.95
3554,3,6451.0,593,Housework,1970-01-01 13:53:00,8.75,614.0,9.09
3555,11,4801.0,593,Travel and Other,1970-01-01 13:53:00,6.51,479.0,7.09
3556,4,2104.0,593,Child Care,1970-01-01 13:53:00,2.85,146.0,2.16
3557,5,500.0,593,Adult Care,1970-01-01 13:53:00,0.68,123.0,1.82
3558,6,37610.0,594,Work and Education,1970-01-01 13:54:00,50.99,3374.0,49.93
3559,10,22308.0,594,Leisure,1970-01-01 13:54:00,30.24,2022.0,29.92
3560,3,6448.0,594,Housework,1970-01-01 13:54:00,8.74,616.0,9.12
3561,11,4792.0,594,Travel and Other,1970-01-01 13:54:00,6.5,476.0,7.04
3562,4,2102.0,594,Child Care,1970-01-01 13:54:00,2.85,147.0,2.18
3563,5,498.0,594,Adult Care,1970-01-01 13:54:00,0.68,123.0,1.82
3564,6,37623.0,595,Work and Education,1970-01-01 13:55:00,51.01,3376.0,49.96
3565,10,22303.0,595,Leisure,1970-01-01 13:55:00,30.24,2023.0,29.93
3566,3,6440.0,595,Housework,1970-01-01 13:55:00,8.73,615.0,9.1
3567,11,4796.0,595,Travel and Other,1970-01-01 13:55:00,6.5,476.0,7.04
3568,4,2102.0,595,Child Care,1970-01-01 13:55:00,2.85,146.0,2.16
3569,5,494.0,595,Adult Care,1970-01-01 13:55:00,0.67,122.0,1.81
3570,6,37806.0,596,Work and Education,1970-01-01 13:56:00,51.26,3387.0,50.12
3571,10,22172.0,596,Leisure,1970-01-01 13:56:00,30.06,2028.0,30.01
3572,3,6458.0,596,Housework,1970-01-01 13:56:00,8.76,608.0,9.0
3573,11,4701.0,596,Travel and Other,1970-01-01 13:56:00,6.37,459.0,6.79
3574,4,2137.0,596,Child Care,1970-01-01 13:56:00,2.9,154.0,2.28
3575,5,484.0,596,Adult Care,1970-01-01 13:56:00,0.66,122.0,1.81
3576,6,37829.0,597,Work and Education,1970-01-01 13:57:00,51.29,3390.0,50.16
3577,10,22173.0,597,Leisure,1970-01-01 13:57:00,30.06,2033.0,30.08
3578,3,6468.0,597,Housework,1970-01-01 13:57:00,8.77,605.0,8.95
3579,11,4668.0,597,Travel and Other,1970-01-01 13:57:00,6.33,451.0,6.67
3580,4,2130.0,597,Child Care,1970-01-01 13:57:00,2.89,156.0,2.31
3581,5,490.0,597,Adult Care,1970-01-01 13:57:00,0.66,123.0,1.82
3582,6,37846.0,598,Work and Education,1970-01-01 13:58:00,51.31,3391.0,50.18
3583,10,22182.0,598,Leisure,1970-01-01 13:58:00,30.07,2035.0,30.11
3584,3,6468.0,598,Housework,1970-01-01 13:58:00,8.77,601.0,8.89
3585,11,4642.0,598,Travel and Other,1970-01-01 13:58:00,6.29,454.0,6.72
3586,4,2137.0,598,Child Care,1970-01-01 13:58:00,2.9,155.0,2.29
3587,5,483.0,598,Adult Care,1970-01-01 13:58:00,0.65,122.0,1.81
3588,6,37867.0,599,Work and Education,1970-01-01 13:59:00,51.34,3393.0,50.21
3589,10,22180.0,599,Leisure,1970-01-01 13:59:00,30.07,2038.0,30.16
3590,3,6466.0,599,Housework,1970-01-01 13:59:00,8.77,603.0,8.92
3591,11,4615.0,599,Travel and Other,1970-01-01 13:59:00,6.26,451.0,6.67
3592,4,2144.0,599,Child Care,1970-01-01 13:59:00,2.91,154.0,2.28
3593,5,486.0,599,Adult Care,1970-01-01 13:59:00,0.66,119.0,1.76
3594,6,37878.0,600,Work and Education,1970-01-01 14:00:00,51.35,3393.0,50.21
3595,10,22171.0,600,Leisure,1970-01-01 14:00:00,30.06,2039.0,30.17
3596,3,6468.0,600,Housework,1970-01-01 14:00:00,8.77,603.0,8.92
3597,11,4617.0,600,Travel and Other,1970-01-01 14:00:00,6.26,453.0,6.7
3598,4,2140.0,600,Child Care,1970-01-01 14:00:00,2.9,154.0,2.28
3599,5,484.0,600,Adult Care,1970-01-01 14:00:00,0.66,116.0,1.72
3600,6,36220.0,601,Work and Education,1970-01-01 14:01:00,49.11,3245.0,48.02
3601,10,21829.0,601,Leisure,1970-01-01 14:01:00,29.6,2021.0,29.91
3602,11,6550.0,601,Travel and Other,1970-01-01 14:01:00,8.88,614.0,9.09
3603,3,6543.0,601,Housework,1970-01-01 14:01:00,8.87,616.0,9.12
3604,4,2143.0,601,Child Care,1970-01-01 14:01:00,2.91,154.0,2.28
3605,5,473.0,601,Adult Care,1970-01-01 14:01:00,0.64,108.0,1.6
3606,6,36240.0,602,Work and Education,1970-01-01 14:02:00,49.13,3244.0,48.0
3607,10,21850.0,602,Leisure,1970-01-01 14:02:00,29.62,2027.0,29.99
3608,3,6541.0,602,Housework,1970-01-01 14:02:00,8.87,615.0,9.1
3609,11,6513.0,602,Travel and Other,1970-01-01 14:02:00,8.83,615.0,9.1
3610,4,2145.0,602,Child Care,1970-01-01 14:02:00,2.91,150.0,2.22
3611,5,469.0,602,Adult Care,1970-01-01 14:02:00,0.64,107.0,1.58
3612,6,36268.0,603,Work and Education,1970-01-01 14:03:00,49.17,3246.0,48.03
3613,10,21885.0,603,Leisure,1970-01-01 14:03:00,29.67,2034.0,30.1
3614,3,6555.0,603,Housework,1970-01-01 14:03:00,8.89,607.0,8.98
3615,11,6429.0,603,Travel | |
# Copyright 2019 Graphcore Ltd.
import os
import sys
import json
import argparse
import math
from typing import List, Any, Optional
import numpy as np
import onnx
from logging import getLogger
from onnx import TensorProto, numpy_helper
from bert_model import BertConfig
logger = getLogger(__name__)
def load_initializers_from_onnx(model_path):
initializers = {}
model = onnx.load(model_path)
for weight in model.graph.initializer:
if weight.data_type == TensorProto.FLOAT16:
int_data = np.asarray(weight.int32_data, np.int32)
np_weight = int_data.view(dtype=np.float16).reshape(weight.dims)
else:
np_weight = numpy_helper.to_array(weight)
initializers[weight.name] = np_weight
return initializers
def save_model_statistics(model_path, writer, i=0):
initializers = load_initializers_from_onnx(model_path)
for name, np_weight in initializers.items():
name = name.replace(":", "_")
writer.add_histogram(name, np_weight, i)
writer.add_scalar(f"L2/{name}", np.linalg.norm(np_weight), i)
def parser_from_NamedTuple(parser, ntuple, args={}):
for key in ntuple._fields:
string = "--" + key.replace("_", "-")
t = ntuple._field_types[key]
default = ntuple._field_defaults.get(key, None)
kwargs = dict(
string=string,
type=t,
default=default,
dest=key
)
if t is bool:
# Make bool a flag
del kwargs["type"]
del kwargs["default"]
kwargs["action"] = "store_false" if ntuple._field_defaults[key] else "store_true"
else:
for _t in (str, int):
if t == List[_t]:
kwargs["type"] = _t
kwargs["nargs"] = '*'
break
if t == Optional[_t]:
kwargs["type"] = _t
break
args_or_help = args.get(key, None)
if isinstance(args_or_help, dict):
kwargs.update(**args_or_help)
else:
kwargs["help"] = args_or_help
string = kwargs["string"]
del kwargs["string"]
parser.add_argument(string, **kwargs)
class ScheduleArgumentParser(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
self._nargs = nargs
super(ScheduleArgumentParser, self).__init__(
option_strings, dest, nargs=nargs, **kwargs)
self.default_input = kwargs['default']
def __call__(self, parser, namespace, values, option_string=None):
schedule = {}
if len(values) == 0:
schedule = self.default_input
for kv in values:
training_proportion, lr = kv.split(":")
try:
schedule[int(training_proportion)] = float(lr)
except ValueError as ex:
logger.warn("Invalid Learning Rate Schedule provided. "
"It should be a set of int:float pairs.")
raise ex
setattr(namespace, self.dest, schedule)
class ValidationConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = None
try:
value = json.loads(values)
except json.decoder.JSONDecodeError as e:
pass
if os.path.isfile(values):
with open(values, 'r') as f:
value = json.load(f)
if value is not None:
setattr(namespace, self.dest, value)
def parse_bert_args(args_string=None):
pparser = argparse.ArgumentParser("Config Parser", add_help=False)
pparser.add_argument("--config", type=str)
pargs, remaining_argv = pparser.parse_known_args(args_string)
parser = argparse.ArgumentParser(
"PopART BERT", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# TODO: Organise Argument Groups.
group = parser.add_argument_group("Model Config")
parser_from_NamedTuple(group, BertConfig, args={
"batch_size": "Set the micro batch-size",
"sequence_length": "Set the max sequence length",
"mask_tokens": "Set the max number of masked tokens in a sequence (PRETRAINING only)",
"vocab_length": "Set the size of the vocabulary",
"hidden_size": "Set the size of the hidden state of the transformer layers",
"ff_size__": dict(
string="--ff-size",
help="Set the size of the intermediate state of the feed forward layers, by default 4x the hidden_size"
),
"attention_heads": "Set the number of heads in self attention",
"inference": "Create a model for inference. Otherwise a trainable model is created and trained.",
"num_layers": "Set the number of transformer layers",
"layers_per_ipu": "Set the number of layers on each IPU",
"no_dropout": "Don't use dropout",
"dropout_prob": "Set the dropout probability",
"layer_norm_eps": "Set the layer norm epsilon value",
"popart_dtype": dict(
string="--dtype",
choices=["FLOAT16", "FLOAT"],
help="Set the data type used"
),
"task": dict(
choices=["PRETRAINING", "SQUAD", "MRPC"],
help="Set the task. Pretraining (Masked-LM & Next Sentence Prediction), SQuAD, MRPC"
),
"positional_embedding_init_fn": dict(
choices=["DEFAULT", "TRANSFORMER", "SIMPLIFIED"],
help="Set the function used to initialise the positional embeddings"
),
"custom_ops": dict(
choices=["gather", "attention", "feed_forward"],
help="Use Custom Operators"
),
"split_linear_layers": "Memory Optimisation to serialise MatMul Operations. Required for large sequence_length",
"squeeze_model": "Try to use fewer IPUs by placing the input embedding and loss onto the \
same IPUs as the first and last tranformer layers respectively",
"no_mask": "Don't apply padding masks to the attention scores",
"projection_serialization_steps": "Split the final MLM projection into this many steps"
})
group.add_argument("--use-ipu-model", action="store_true",
help="Target the IpuModel (acquires a real IPU device by default). \
WARNING: The custom ops do not have validated cycle estimates \
so do not rely on the model's cycle report.")
group = parser.add_argument_group("SQuAD Config")
group.add_argument("--vocab-file", type=str,
help="Path to the vocab file")
group.add_argument("--do-lower-case", action="store_true",
help="Use this when using a uncased vocab")
group.add_argument("--squad-results-dir", type=str, default="squad_results",
help="Path to directory to write results (Note: will be created if path does not exist)")
group.add_argument("--squad-evaluate-script", type=str,
help="Path to SQuAD evaulate-v1.1.py script")
group = parser.add_argument_group("Training Config")
group.add_argument("--gradient-accumulation-factor", type=int, default=1,
help="Set how many gradients to accumulate before updating the weights. (Note: This changes the effective batch size)")
group.add_argument("--replication-factor", type=int, default=1,
help="Replicates the graph by this factor across IPUs to achieve data parallel execution. (Note: This changes the effective batch size)")
group.add_argument("--learning-rate", type=float, default=0.0008,
help="Set the learning rate")
group.add_argument("--momentum", type=float, default=0.984375,
help="Set the optimizer momentum value")
group.add_argument("--dampening", type=float,
help="Set the optimizer dampening value. (Note: this will be set to momentum value by default)")
group.add_argument("--velocity-scaling", type=float, default=1.0,
help="Set the velocity scaling. This helps prevent overflow when accumulating gradients.")
group.add_argument("--loss-scaling", type=float, default=4.0,
help="Set the loss scaling. This helps prevent underflow during backpropagation.")
group.add_argument("--epochs", type=int, default=35,
help="Number of epochs to train for")
group.add_argument("--stochastic-rounding", action="store_true",
help="Turn on Stochastic Rounding")
group = parser.add_argument_group("Continuous Pipelining Config")
group.add_argument("--pipeline-lr-scaling", action="store_true",
help="Enable learning rate scaling per pipeline stage")
group.add_argument("--pipeline-lr-scaling-offset", type=float, default=0.25,
help="Set the value for learning rate scaling on the first pipeline stage. Learning rates will be scaled "
"linearly from this offset (default: 0.25) to 1 as pipeline stage increases to account for increased errors "
"at lower-level stages when pipelining. (Note: for pipelines with few stages, this should be increased)")
group.add_argument("--pipeline-momentum-scaling", action="store_true",
help="Enable momentum and dampening scaling per pipeline stage")
group.add_argument("--pipeline-momentum-scaling-offset", type=float, default=0.1,
help="Set the value momentum scaling on the last pipeline stage. Momentums will be scaled "
"linearly from this offset (default: 0.1) to 1 as pipeline stage decrease to account for increased errors "
"at lower-level stages when pipelining. (Note: for pipelines with few stages, this should be increased)")
group.add_argument("--pipeline-dampening-scaling-offset", type=float,
help="Set the value for dampening scaling on the last pipeline stage. Dampenings will be scaled "
"linearly from this offset (default: same as momentum) to 1 as pipeline stage decrease to account for increased errors "
"at lower-level stages when pipelining. (Note: this will be set to the momentum offset by default)")
group = parser.add_mutually_exclusive_group()
group.add_argument("--lr-schedule-by-epoch", action=ScheduleArgumentParser, nargs="*", default=None,
help="A schedule for learning rate warmup and decay, provided as space-separated "
"<int>:<float> pairs. The first item is the epoch at which to update and the second is "
"the learning rate at that epoch. \n"
"E.g.: --lr-schedule-by-epoch 0:0.00001 1:0.0001 3:0.0008 5:0.00004 10:0.00002")
group.add_argument("--lr-schedule-by-step", action=ScheduleArgumentParser, nargs="*", default=None,
help="A schedule for learning rate warmup and decay, provided as space-separated "
"<int>:<float> pairs. The first item is the step at which to update and the second is "
"the learning rate at that step. \n"
"E.g.: --lr-schedule-by-step 0:0.00001 2500:0.0001 10000:0.0008 50000:0.00004 100000:0.00002")
group = parser.add_mutually_exclusive_group()
group.add_argument("--ls-schedule-by-epoch", action=ScheduleArgumentParser, nargs="*", default=None,
help="A schedule for loss scaling, provided as space-separated <int>:<float> pairs. "
"The first item is the spoch at which to update and the second is "
"the loss scaling at that epoch. \n"
"E.g.: --ls-schedule-by-step 0:0.00001 2500:0.0001 10000:0.0008 50000:0.00004 100000:0.00002")
group.add_argument("--ls-schedule-by-step", action=ScheduleArgumentParser, nargs="*", default=None,
help="A schedule for loss scaling, provided as space-separated <int>:<float> pairs. "
"The first item is the step at which to update and the second is "
"the loss scaling at that step. \n"
"E.g.: --ls-schedule-by-step 0:0.00001 2500:0.0001 10000:0.0008 50000:0.00004 100000:0.00002")
group = parser.add_argument_group("Initialisation Config", "Flags for initialising the weights")
group.add_argument("--tf-checkpoint", type=str,
help="Path to Tensorflow Checkpoint to initialise the model.")
group.add_argument("--onnx-checkpoint", type=str,
help="Path to .onnx file created by this application to initialise the model.")
group = parser.add_argument_group("Data Config")
group.add_argument("--input-files", type=str, nargs="*",
help="Files to load data from. "
"For Pretraining: Binary files created by bert_data/create_pretraining_data.py. "
"For SQuAD: Path to train-v1.1.json")
group.add_argument("--shuffle", action="store_true",
help="Shuffle Dataset")
group.add_argument("--overwrite-cache", action="store_true",
help="Regenerates the SQuAD dataset instead of loading the cache if available")
group.add_argument("--no-drop-remainder", action="store_true",
help="Adjust the batches_per_step to perfectly divide the dataset so no data is missed. Only available for SQuAD.")
group.add_argument("--synthetic-data", action="store_true",
help="Generate a synthetic dataset. Creates enough data for one step per epoch. "
"Increase --epochs for multiple perfomance measurements.")
group.add_argument("--duplication-factor", type=int, default=1,
help="Set the number of times the dataset has been duplicated. This reduces the samples per epoch to"
" (# of samples in input-files)/duplication-factor")
group.add_argument("--epochs-to-cache", type=int, default=0,
help="Number of epochs of data to load into memory during PRETRAINING. Default is to load input files as needed.")
group = parser.add_argument_group("Execution Config")
group.add_argument("--batches-per-step", type=int, | |
<reponame>AstraZeneca/magnus-extensions
import logging
import json
from string import Template as str_template
import datetime
from collections import OrderedDict
from magnus.datastore import BaseRunLogStore, RunLog, StepLog, BranchLog
from magnus import defaults
from magnus import exceptions
logger = logging.getLogger(defaults.NAME)
logger.info('Loading DB datastore extension')
try:
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import declarative_base
from sqlalchemy import Column, Text, DateTime, Sequence, Integer
Base = declarative_base()
class DBLog(Base):
"""
Base table for storing run logs in database.
In this model, we fragment the run log into logical units that are concurrent safe.
"""
__tablename__ = 'db_log'
pk = Column(Integer, Sequence('id_seq'), primary_key=True)
run_id = Column(Text)
attribute_key = Column(Text) # run_log, step_internal_name, parameter_key etc
attribute_type = Column(Text) # RunLog, Step, Branch, Parameter
attribute_value = Column(Text) # The JSON string
created_at = Column(DateTime, default=datetime.datetime.utcnow)
def __repr__(self):
return f'DBLog for {self.run_id} and {self.attribute_key}'
except ImportError as _e:
logger.exception('Unable to import SQLalchemy, is it installed?')
msg = (
"SQLAlchemy is required for this extension. Please install it"
)
raise Exception(msg) from _e
def create_tables(connection_string):
"""
from magnus.datastore_extensions import db
connection_string = 'postgresql://localhost/'
db.create_tables(connection_string)
"""
engine = sqlalchemy.create_engine(connection_string)
Base.metadata.create_all(engine)
class DBStore(BaseRunLogStore):
"""
Using SQL alchemy to interface for a database as a Run Log store.
All concurrently accessible attributes are stored in independent rows
The expected columns:
run_id : str,
attribute_key: str, # step name, branch name, parameter name etc
attribute_type: str, # step, branch, parameter, run_log
attribute_value: json, # string representation of JSON
created_at: UTC time now, #Needed for ordering the Run log
Example config:
run_log:
type: db
config:
connection_string: The connection string to use in SQLAlchemy. Secret placeholders are fine.
"""
service_name = 'db'
def __init__(self, config):
super().__init__(config)
if 'connection_string' not in self.config:
raise Exception('Run Log stores of DB should have a connection string')
self.engine = None # Follows a singleton pattern
self.session = None
# Attribute Types
self.RUNLOG_ATTRIBUTE_TYPE = 'RunLog' # pylint: disable=c0103
self.PARAMETER_ATTRIBUTE_TYPE = 'Parameter' # pylint: disable=c0103
self.STEPLOG_ATTRIBUTE_TYPE = 'StepLog' # pylint: disable=c0103
self.BRANCHLOG_ATTRIBUTE_TYPE = 'BranchLog' # pylint: disable=c0103
@property
def connection_string(self) -> str:
"""
Returns the connection string as provided by the config
Raises:
Exception: If connection_string is not provided in the config
Returns:
str: The connection string specified in the config
"""
return self.config['connection_string']
def _make_db_engine(self):
"""
Creates a DB engine and session object once per execution. Singleton pattern
"""
if not self.engine:
from magnus.pipeline import global_executor # pylint: disable=C0415
secrets = global_executor.secrets_handler.get() # Returns all secrets as dictionary
connection_string = str_template(self.connection_string).safe_substitute(**secrets)
self.engine = sqlalchemy.create_engine(connection_string, pool_pre_ping=True)
self.session = sessionmaker(bind=self.engine)
def write_to_db(self, run_id: str, attribute_key: str, attribute_type: str, attribute_value: str):
"""
Write a part of the Run log to the database against a specified run_id
Args:
run_id (str): The run id to update the run log
attribute_key (str): run_log for RunLog, the step log internal name for steps, parameter key for parameter
attribute_type (str): One of RunLog, Parameter, StepLog, BranchLog
attribute_value (str): The value to put in the database
"""
self._make_db_engine()
with self.session() as session:
record = DBLog(run_id=run_id, attribute_key=attribute_key,
attribute_type=attribute_type, attribute_value=attribute_value)
session.add(record)
session.commit()
def update_db(self, run_id: str, attribute_type: str, attribute_key: str, attribute_value: str, upsert=True):
"""
Update the Database with a part of run log.
If upsert is true, we create the object instead of updating.
Args:
run_id (str): The run id to update the run log
attribute_key (str): run_log for RunLog, the step log internal name for steps, parameter key for parameter
attribute_type (str): One of RunLog, Parameter, StepLog, BranchLog
attribute_value (str): The value to put in the database
upsert (bool, optional): Create if not present. Defaults to True.
"""
self._make_db_engine()
with self.session() as session:
records = session.query(DBLog).filter(DBLog.run_id == run_id). \
filter(DBLog.attribute_type == attribute_type).\
filter(DBLog.attribute_key == attribute_key).all()
for record in records:
record.attribute_value = attribute_value
if upsert and len(records) == 0:
# We insert otherwise
record = DBLog(run_id=run_id, attribute_key=attribute_key,
attribute_type=attribute_type, attribute_value=attribute_value)
session.add(record)
session.commit()
def get_from_db(self, run_id: str, attribute_type: str, attribute_key=None):
"""
Gets an attribute from the db.
The attribute could be a RunLog, StepLog, BranchLog or Parameter
Args:
run_id (str): The Run Id to retrieve the attribute
attribute_type (str): The attribute to retrieve from the DB for the run_id
attribute_key (str, optional): Conditional filtering of attribute. Defaults to None.
Returns:
object: All the records from the DB which match the run_id and attribute
"""
self._make_db_engine()
with self.session() as session:
query = session.query(DBLog).order_by(DBLog.created_at).filter(DBLog.run_id == run_id). \
filter(DBLog.attribute_type == attribute_type)
if attribute_key:
query = query.filter(DBLog.attribute_key == attribute_key)
records = query.all()
return records
def _get_parent_branch(self, name: str) -> str: # pylint: disable=R0201
"""
Returns the name of the parent branch.
If the step is part of main dag, return None.
Args:
name (str): The name of the step.
Returns:
str: The name of the branch containing the step.
"""
dot_path = name.split('.')
if len(dot_path) == 1:
return None
# Ignore the step name
return '.'.join(dot_path[:-1])
def _get_parent_step(self, name: str) -> str: # pylint: disable=R0201
"""
Returns the step containing the step, useful when we have steps within a branch.
Returns None, if the step belongs to parent dag.
Args:
name (str): The name of the step to find the parent step it belongs to.
Returns:
str: The parent step the step belongs to, None if the step belongs to parent dag.
"""
dot_path = name.split('.')
if len(dot_path) == 1:
return None
# Ignore the branch.step_name
return '.'.join(dot_path[:-2])
def prepare_full_run_log(self, run_log: RunLog) -> RunLog:
"""
Populates the run log with the branches and steps.
In database mode, we fragment the run log into individual Run Logs and this method populates the
full run log by querying the underlying tables.
Args:
run_log (RunLog): The partial run log containing empty step logs
"""
run_id = run_log.run_id
run_log.parameters = self.get_parameters(run_id=run_id)
all_steps = self.get_from_db(run_id=run_id, attribute_type=self.STEPLOG_ATTRIBUTE_TYPE)
all_branches = self.get_from_db(run_id=run_id, attribute_type=self.BRANCHLOG_ATTRIBUTE_TYPE)
ordered_steps = OrderedDict()
for step in all_steps:
json_str = json.loads(step.attribute_value)
ordered_steps[step.attribute_key] = StepLog(**json_str)
ordered_branches = OrderedDict()
for branch in all_branches:
json_str = json.loads(branch.attribute_value)
ordered_branches[branch.attribute_key] = BranchLog(**json_str)
current_branch = None
for step_internal_name in ordered_steps:
current_branch = self._get_parent_branch(step_internal_name)
step_to_add_branch = self._get_parent_step(step_internal_name)
if not current_branch:
current_branch = run_log
else:
current_branch = ordered_branches[current_branch]
step_to_add_branch = ordered_steps[step_to_add_branch]
step_to_add_branch.branches[current_branch.internal_name] = current_branch
current_branch.steps[step_internal_name] = ordered_steps[step_internal_name]
def create_run_log(self, run_id, **kwargs):
"""
Creates a Run Log object by using the config
Logically the method should do the following:
* Creates a Run log
* Adds it to the db
* Return the log
"""
logger.info(f'{self.service_name} Creating a Run Log for : {run_id}')
run_log = RunLog(run_id, status=defaults.CREATED)
self.write_to_db(run_id=run_id,
attribute_key='run_log',
attribute_type=self.RUNLOG_ATTRIBUTE_TYPE,
attribute_value=json.dumps(run_log.to_dict(), ensure_ascii=True)) # pylint: disable=no-member
return run_log
def get_run_log_by_id(self, run_id, full=True, **kwargs):
"""
Retrieves a Run log from the database using the config and the run_id
Args:
run_id (str): The run_id of the run
Logically the method should:
* Returns the run_log defined by id from the data store defined by the config
Raises:
RunLogNotFoundError: If the run log for run_id is not found in the datastore
"""
try:
logger.info(f'{self.service_name} Getting a Run Log for : {run_id}')
records = self.get_from_db(run_id=run_id,
attribute_key='run_log',
attribute_type=self.RUNLOG_ATTRIBUTE_TYPE)
if not records:
raise exceptions.RunLogNotFoundError(run_id)
json_str = json.loads(records[0].attribute_value)
run_log = RunLog(**json_str)
if full:
self.prepare_full_run_log(run_log)
return run_log
except:
raise exceptions.RunLogNotFoundError(run_id)
def put_run_log(self, run_log, **kwargs):
"""
Puts the Run Log in the database as defined by the config
Args:
run_log (RunLog): The Run log of the run
Logically the method should:
Puts the run_log into the database
"""
run_id = run_log.run_id
logger.info(f'{self.service_name} Putting the Run Log for : {run_id}')
self.update_db(run_id=run_id,
attribute_key='run_log',
attribute_type=self.RUNLOG_ATTRIBUTE_TYPE,
attribute_value=json.dumps(run_log.dict(), ensure_ascii=True)) # pylint: disable=no-member
def get_parameters(self, run_id, **kwargs):
"""
Get the parameters from the Run log defined by the run_id
Args:
run_id (str): The run_id of the run
The method should:
* Call get_run_log_by_id(run_id) to retrieve the run_log
* Return the parameters as identified in the run_log
Returns:
dict: A dictionary of the run_log parameters
Raises:
RunLogNotFoundError: If the run log for run_id is not found in the datastore
"""
logger.info(f'{self.service_name} Getting parameters for : {run_id}')
records = self.get_from_db(run_id=run_id, attribute_type=self.PARAMETER_ATTRIBUTE_TYPE)
parameters = {}
for record in records:
parameters[record.attribute_key] = json.loads(record.attribute_value)
return parameters
def set_parameters(self, run_id, parameters, **kwargs):
"""
Update the parameters of the Run log with | |
<reponame>teixemf/netbox
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import gettext as _
from dcim.choices import *
from dcim.constants import *
from dcim.models import *
from tenancy.models import *
from extras.forms import CustomFieldModelFilterForm, LocalConfigContextFilterForm
from ipam.models import ASN
from tenancy.forms import ContactModelFilterForm, TenancyFilterForm
from utilities.forms import (
APISelectMultiple, add_blank_choice, ColorField, DynamicModelMultipleChoiceField, FilterForm, StaticSelect,
StaticSelectMultiple, TagFilterField, BOOLEAN_WITH_BLANK_CHOICES,
)
from wireless.choices import *
__all__ = (
'CableFilterForm',
'ConsoleConnectionFilterForm',
'ConsolePortFilterForm',
'ConsoleServerPortFilterForm',
'DeviceBayFilterForm',
'DeviceFilterForm',
'DeviceRoleFilterForm',
'DeviceTypeFilterForm',
'FrontPortFilterForm',
'InterfaceConnectionFilterForm',
'InterfaceFilterForm',
'InventoryItemFilterForm',
'LocationFilterForm',
'ManufacturerFilterForm',
'PlatformFilterForm',
'PowerConnectionFilterForm',
'PowerFeedFilterForm',
'PowerOutletFilterForm',
'PowerPanelFilterForm',
'PowerPortFilterForm',
'RackFilterForm',
'RackElevationFilterForm',
'RackReservationFilterForm',
'RackRoleFilterForm',
'RearPortFilterForm',
'RegionFilterForm',
'SiteFilterForm',
'SiteGroupFilterForm',
'VirtualChassisFilterForm',
)
class DeviceComponentFilterForm(CustomFieldModelFilterForm):
name = forms.CharField(
required=False
)
label = forms.CharField(
required=False
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id',
'group_id': '$site_group_id',
},
label=_('Site')
)
location_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.all(),
required=False,
query_params={
'site_id': '$site_id',
},
label=_('Location')
)
virtual_chassis_id = DynamicModelMultipleChoiceField(
queryset=VirtualChassis.objects.all(),
required=False,
label=_('Virtual Chassis')
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
query_params={
'site_id': '$site_id',
'location_id': '$location_id',
'virtual_chassis_id': '$virtual_chassis_id'
},
label=_('Device')
)
class RegionFilterForm(ContactModelFilterForm, CustomFieldModelFilterForm):
model = Region
field_groups = [
['q', 'tag'],
['parent_id'],
['contact', 'contact_role'],
]
parent_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Parent region')
)
tag = TagFilterField(model)
class SiteGroupFilterForm(ContactModelFilterForm, CustomFieldModelFilterForm):
model = SiteGroup
field_groups = [
['q', 'tag'],
['parent_id'],
['contact', 'contact_role'],
]
parent_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Parent group')
)
tag = TagFilterField(model)
class SiteFilterForm(TenancyFilterForm, ContactModelFilterForm, CustomFieldModelFilterForm):
model = Site
field_groups = [
['q', 'tag'],
['status', 'region_id', 'group_id'],
['tenant_group_id', 'tenant_id'],
['asn_id'],
['contact', 'contact_role'],
]
status = forms.MultipleChoiceField(
choices=SiteStatusChoices,
required=False,
widget=StaticSelectMultiple(),
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
asn_id = DynamicModelMultipleChoiceField(
queryset=ASN.objects.all(),
required=False,
label=_('ASNs')
)
tag = TagFilterField(model)
class LocationFilterForm(TenancyFilterForm, ContactModelFilterForm, CustomFieldModelFilterForm):
model = Location
field_groups = [
['q', 'tag'],
['region_id', 'site_group_id', 'site_id', 'parent_id'],
['tenant_group_id', 'tenant_id'],
['contact', 'contact_role'],
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id',
'group_id': '$site_group_id',
},
label=_('Site')
)
parent_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.all(),
required=False,
query_params={
'region_id': '$region_id',
'site_id': '$site_id',
},
label=_('Parent')
)
tag = TagFilterField(model)
class RackRoleFilterForm(CustomFieldModelFilterForm):
model = RackRole
tag = TagFilterField(model)
class RackFilterForm(TenancyFilterForm, ContactModelFilterForm, CustomFieldModelFilterForm):
model = Rack
field_groups = [
['q', 'tag'],
['region_id', 'site_id', 'location_id'],
['status', 'role_id'],
['type', 'width', 'serial', 'asset_tag'],
['tenant_group_id', 'tenant_id'],
['contact', 'contact_role']
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id'
},
label=_('Site')
)
location_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.all(),
required=False,
null_option='None',
query_params={
'site_id': '$site_id'
},
label=_('Location')
)
status = forms.MultipleChoiceField(
choices=RackStatusChoices,
required=False,
widget=StaticSelectMultiple()
)
type = forms.MultipleChoiceField(
choices=RackTypeChoices,
required=False,
widget=StaticSelectMultiple()
)
width = forms.MultipleChoiceField(
choices=RackWidthChoices,
required=False,
widget=StaticSelectMultiple()
)
role_id = DynamicModelMultipleChoiceField(
queryset=RackRole.objects.all(),
required=False,
null_option='None',
label=_('Role')
)
serial = forms.CharField(
required=False
)
asset_tag = forms.CharField(
required=False
)
tag = TagFilterField(model)
class RackElevationFilterForm(RackFilterForm):
id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
label=_('Rack'),
required=False,
query_params={
'site_id': '$site_id',
'location_id': '$location_id',
}
)
class RackReservationFilterForm(TenancyFilterForm, CustomFieldModelFilterForm):
model = RackReservation
field_groups = [
['q', 'tag'],
['user_id'],
['region_id', 'site_id', 'location_id'],
['tenant_group_id', 'tenant_id'],
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id'
},
label=_('Site')
)
location_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.prefetch_related('site'),
required=False,
label=_('Location'),
null_option='None'
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
tag = TagFilterField(model)
class ManufacturerFilterForm(ContactModelFilterForm, CustomFieldModelFilterForm):
model = Manufacturer
field_groups = [
['q', 'tag'],
['contact', 'contact_role'],
]
tag = TagFilterField(model)
class DeviceTypeFilterForm(CustomFieldModelFilterForm):
model = DeviceType
field_groups = [
['q', 'tag'],
['manufacturer_id', 'subdevice_role', 'airflow'],
['console_ports', 'console_server_ports', 'power_ports', 'power_outlets', 'interfaces', 'pass_through_ports'],
]
manufacturer_id = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
label=_('Manufacturer')
)
subdevice_role = forms.MultipleChoiceField(
choices=add_blank_choice(SubdeviceRoleChoices),
required=False,
widget=StaticSelectMultiple()
)
airflow = forms.MultipleChoiceField(
choices=add_blank_choice(DeviceAirflowChoices),
required=False,
widget=StaticSelectMultiple()
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
class DeviceRoleFilterForm(CustomFieldModelFilterForm):
model = DeviceRole
tag = TagFilterField(model)
class PlatformFilterForm(CustomFieldModelFilterForm):
model = Platform
manufacturer_id = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
label=_('Manufacturer')
)
tag = TagFilterField(model)
class DeviceFilterForm(LocalConfigContextFilterForm, TenancyFilterForm, ContactModelFilterForm, CustomFieldModelFilterForm):
model = Device
field_groups = [
['q', 'tag'],
['region_id', 'site_group_id', 'site_id', 'location_id', 'rack_id'],
['status', 'role_id', 'airflow', 'serial', 'asset_tag', 'mac_address'],
['manufacturer_id', 'device_type_id', 'platform_id'],
['tenant_group_id', 'tenant_id'],
[
'has_primary_ip', 'virtual_chassis_member', 'console_ports', 'console_server_ports', 'power_ports',
'power_outlets', 'interfaces', 'pass_through_ports', 'local_context_data',
],
['contact', 'contact_role'],
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id',
'group_id': '$site_group_id',
},
label=_('Site')
)
location_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.all(),
required=False,
null_option='None',
query_params={
'site_id': '$site_id'
},
label=_('Location')
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
null_option='None',
query_params={
'site_id': '$site_id',
'location_id': '$location_id',
},
label=_('Rack')
)
role_id = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False,
label=_('Role')
)
manufacturer_id = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
label=_('Manufacturer')
)
device_type_id = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False,
query_params={
'manufacturer_id': '$manufacturer_id'
},
label=_('Model')
)
platform_id = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False,
null_option='None',
label=_('Platform')
)
status = forms.MultipleChoiceField(
choices=DeviceStatusChoices,
required=False,
widget=StaticSelectMultiple()
)
airflow = forms.MultipleChoiceField(
choices=add_blank_choice(DeviceAirflowChoices),
required=False,
widget=StaticSelectMultiple()
)
serial = forms.CharField(
required=False
)
asset_tag = forms.CharField(
required=False
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
has_primary_ip = forms.NullBooleanField(
required=False,
label='Has a primary IP',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
virtual_chassis_member = forms.NullBooleanField(
required=False,
label='Virtual chassis member',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
class VirtualChassisFilterForm(TenancyFilterForm, CustomFieldModelFilterForm):
model = VirtualChassis
field_groups = [
['q', 'tag'],
['region_id', 'site_group_id', 'site_id'],
['tenant_group_id', 'tenant_id'],
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id',
'group_id': '$site_group_id',
},
label=_('Site')
)
tag = TagFilterField(model)
class CableFilterForm(TenancyFilterForm, CustomFieldModelFilterForm):
model = Cable
field_groups = [
['q', 'tag'],
['site_id', 'rack_id', 'device_id'],
['type', 'status', 'color', 'length', 'length_unit'],
['tenant_group_id', 'tenant_id'],
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id'
},
label=_('Site')
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label=_('Rack'),
null_option='None',
query_params={
'site_id': '$site_id'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
query_params={
'site_id': '$site_id',
'tenant_id': '$tenant_id',
'rack_id': '$rack_id',
},
label=_('Device')
)
type = forms.MultipleChoiceField(
choices=add_blank_choice(CableTypeChoices),
required=False,
widget=StaticSelect()
)
status = forms.ChoiceField(
required=False,
choices=add_blank_choice(LinkStatusChoices),
widget=StaticSelect()
)
color = ColorField(
required=False
)
length = forms.IntegerField(
required=False
)
length_unit = forms.ChoiceField(
choices=add_blank_choice(CableLengthUnitChoices),
required=False
)
tag = TagFilterField(model)
class PowerPanelFilterForm(ContactModelFilterForm, CustomFieldModelFilterForm):
model = PowerPanel
field_groups = (
('q', 'tag'),
('region_id', 'site_group_id', 'site_id', 'location_id'),
('contact', 'contact_role')
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id',
'group_id': '$site_group_id',
},
label=_('Site')
)
location_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.all(),
required=False,
null_option='None',
query_params={
'site_id': '$site_id'
},
label=_('Location')
)
tag = TagFilterField(model)
class PowerFeedFilterForm(CustomFieldModelFilterForm):
model = PowerFeed
field_groups = [
['q', 'tag'],
['region_id', 'site_group_id', 'site_id'],
['power_panel_id', 'rack_id'],
['status', 'type', 'supply', 'phase', 'voltage', 'amperage', 'max_utilization'],
]
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region_id'
},
label=_('Site')
)
power_panel_id = DynamicModelMultipleChoiceField(
queryset=PowerPanel.objects.all(),
required=False,
null_option='None',
query_params={
'site_id': '$site_id'
},
label=_('Power panel')
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
null_option='None',
query_params={
'site_id': '$site_id'
},
label=_('Rack')
)
status = forms.MultipleChoiceField(
choices=PowerFeedStatusChoices,
required=False,
widget=StaticSelectMultiple()
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerFeedTypeChoices),
required=False,
widget=StaticSelect()
)
supply = forms.ChoiceField(
choices=add_blank_choice(PowerFeedSupplyChoices),
required=False,
widget=StaticSelect()
)
phase = forms.ChoiceField(
choices=add_blank_choice(PowerFeedPhaseChoices),
required=False,
widget=StaticSelect()
)
voltage = forms.IntegerField(
required=False
)
amperage = forms.IntegerField(
required=False
)
max_utilization = forms.IntegerField(
required=False
)
tag = TagFilterField(model)
#
# Device components
#
class ConsolePortFilterForm(DeviceComponentFilterForm):
model = ConsolePort
field_groups = [
['q', 'tag'],
['name', 'label', 'type', 'speed'],
['region_id', 'site_group_id', 'site_id', 'location_id', 'virtual_chassis_id', 'device_id'],
]
type = forms.MultipleChoiceField(
choices=ConsolePortTypeChoices,
required=False,
widget=StaticSelectMultiple()
)
speed = forms.MultipleChoiceField(
choices=ConsolePortSpeedChoices,
required=False,
widget=StaticSelectMultiple()
)
tag = TagFilterField(model)
class ConsoleServerPortFilterForm(DeviceComponentFilterForm):
model = ConsoleServerPort
field_groups = [
['q', 'tag'],
['name', 'label', 'type', 'speed'],
['region_id', 'site_group_id', 'site_id', 'location_id', 'virtual_chassis_id', 'device_id'],
]
type = forms.MultipleChoiceField(
choices=ConsolePortTypeChoices,
| |
<reponame>centrologic/django-codenerix-geodata
# -*- coding: utf-8 -*-
#
# django-codenerix-geodata
#
# Copyright 2017 Centrologic Computational Logistic Center S.L.
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import operator
from functools import reduce
from django.db.models import Q
from django.conf import settings
from django.utils.translation import ugettext as _
from codenerix.multiforms import MultiForm
from codenerix.views import GenList, GenCreate, GenCreateModal, GenUpdate, GenUpdateModal, GenDelete, GenForeignKey
from .models import Continent, Country, Region, Province, TimeZone, City, MODELS
from .forms import ContinentForm, CountryForm, RegionForm, ProvinceForm, TimeZoneForm, CityForm
# forms for multiforms
formsfull = {}
for info in MODELS:
field = info[0]
model = info[1]
formsfull[model] = [(None, None, None)]
for lang_code in settings.LANGUAGES_DATABASES:
query = 'from .models import {}GeoName{}\n'.format(model, lang_code)
query += 'from .forms import {}TextForm{}'.format(model, lang_code)
exec(query)
formsfull[model].append((eval('{}TextForm{}'.format(model, lang_code.upper())), field, None))
class TranslatedMixin(object):
@property
def lang(self):
if hasattr(self, 'request'):
for x in settings.LANGUAGES:
if x[0] == self.request.LANGUAGE_CODE:
return self.request.LANGUAGE_CODE.lower()
return settings.LANGUAGES[0][0].lower()
# ###########################################
# Continent
class GenContinentUrl(object):
ws_entry_point = '{}/continents'.format(settings.CDNX_GEODATA_URL)
class ContinentList(TranslatedMixin, GenContinentUrl, GenList):
model = Continent
linkadd = False
show_details = False
public = True
def __fields__(self, info):
fields = self.model().__fields__(info)
fields.append(
('{}__name'.format(self.lang), _('Name')),
)
return fields
def __searchQ__(self, info, text):
filters = self.model().__searchQ__(info, text)
filters['name'] = Q(**{"{}__name__icontains".format(self.lang): text})
return filters
def __searchF__(self, info):
def f(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
filters = self.model().__searchF__(info)
filters['{}__name'.format(self.lang)] = (_('Name'), f, 'input')
return filters
def dispatch(self, request, *args, **kwargs):
self.order_by = ['{}__name'.format(self.lang)]
return super(ContinentList, self).dispatch(request, *args, **kwargs)
class ContinentCreate(GenContinentUrl, MultiForm, GenCreate):
model = Continent
form_class = ContinentForm
forms = formsfull['Continent']
class ContinentCreateModal(GenCreateModal, ContinentCreate):
pass
class ContinentUpdate(GenContinentUrl, MultiForm, GenUpdate):
model = Continent
form_class = ContinentForm
forms = formsfull['Continent']
class ContinentUpdateModal(GenUpdateModal, ContinentUpdate):
pass
class ContinentDelete(GenContinentUrl, GenDelete):
model = Continent
# ###########################################
# Country
class GenCountryUrl(object):
ws_entry_point = '{}/countries'.format(settings.CDNX_GEODATA_URL)
class CountryList(TranslatedMixin, GenCountryUrl, GenList):
model = Country
linkadd = False
show_details = False
public = True
def __limitQ__(self, info):
result = {}
try:
params = ast.literal_eval(info.request.GET.get('json'))
except ValueError:
params = {}
continent = int(params.get('continent', 0))
if continent:
result['continent_limit'] = Q(continent__pk=continent)
return result
def __fields__(self, info):
return [
('code', _('Code')),
('{}__name'.format(self.lang), _('Name')),
('continent__code', _('Code continent')),
('continent__{}__name'.format(self.lang), _('Name continent')),
]
def __searchQ__(self, info, text):
filters = self.model().__searchQ__(info, text)
filters['name'] = Q(**{"{}__name__icontains".format(self.lang): text})
filters['continent_code'] = Q(**{"continent__code": text})
filters['continent_name'] = Q(**{"continent__{}__name__icontains".format(self.lang): text})
return filters
def __searchF__(self, info):
def f(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
def fc(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"continent__{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
filters = self.model().__searchF__(info)
filters['{}__name'.format(self.lang)] = (_('Name'), f, 'input')
filters['continent__code'] = (_('Code continent'), lambda x: Q(continent__code__icontains=x), 'input')
filters['continent__{}__name'.format(self.lang)] = (_('Continent name'), fc, 'input')
return filters
def dispatch(self, request, *args, **kwargs):
self.order_by = ['{}__name'.format(self.lang)]
return super(CountryList, self).dispatch(request, *args, **kwargs)
class CountryCreate(GenCountryUrl, MultiForm, GenCreate):
model = Country
form_class = CountryForm
forms = formsfull['Country']
class CountryCreateModal(GenCreateModal, CountryCreate):
pass
class CountryUpdate(GenCountryUrl, MultiForm, GenUpdate):
model = Country
form_class = CountryForm
forms = formsfull['Country']
class CountryUpdateModal(GenUpdateModal, CountryUpdate):
pass
class CountryDelete(GenCountryUrl, GenDelete):
model = Country
class CountryForeign(TranslatedMixin, GenCountryUrl, GenForeignKey):
model = Country
label = "{<LANGUAGE_CODE>__name}"
public = True
def get_foreign(self, queryset, search, filters):
# Filter with search string
qsobject = Q(code__istartswith=search)
for lang in settings.LANGUAGES_DATABASES:
qsobject |= Q(**{"{}__name__istartswith".format(lang.lower()): search})
qs = queryset.filter(qsobject).order_by("{}__name".format(self.lang))
return qs[:settings.LIMIT_FOREIGNKEY]
# ###########################################
# Regions
class GenRegionUrl(object):
ws_entry_point = '{}/regions'.format(settings.CDNX_GEODATA_URL)
class RegionList(TranslatedMixin, GenRegionUrl, GenList):
model = Region
linkadd = False
show_details = False
public = True
def __limitQ__(self, info):
result = {}
try:
params = ast.literal_eval(info.request.GET.get('json'))
except ValueError:
params = {}
continent = int(params.get('continent', 0))
country = int(params.get('country', 0))
if continent:
result['continent_limit'] = Q(country__continent__pk=continent)
if country:
result['country_limit'] = Q(country__pk=country)
return result
def __fields__(self, info):
return [
('code', _('Code')),
('{}__name'.format(self.lang), _('Name')),
('country__code', _('Code country')),
('country__{}__name'.format(self.lang), _('Name country')),
('country__continent__code', _('Code continent')),
('country__continent__{}__name'.format(self.lang), _('Name continent')),
]
def __searchQ__(self, info, text):
filters = self.model().__searchQ__(info, text)
filters['name'] = Q(**{"{}__name__icontains".format(self.lang): text})
filters['country_code'] = Q(**{'country__code': text})
filters['country_name'] = Q(**{'country__{}__name__icontains'.format(self.lang): text})
filters['continent_code'] = Q(**{"country__continent__code": text})
filters['continent_name'] = Q(**{"country__continent__{}__name__icontains".format(self.lang): text})
return filters
def __searchF__(self, info):
def f(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
def fco(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"country__continent__{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
def fcu(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"country__{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
filters = self.model().__searchF__(info)
filters['{}__name'.format(self.lang)] = (_('Name'), f, 'input')
filters['country__code'] = (_('Code self'), lambda x: Q(country__code__icontains=x), 'input')
filters['country__{}__name'.format(self.lang)] = (_('Name country'), fcu, 'input')
filters['country__continent__code'] = (_('Code continent'), lambda x: Q(country__continent__code__icontains=x), 'input')
filters['country__continent__{}__name'.format(self.lang)] = (_('Name continent'), fco, 'input')
return filters
def dispatch(self, request, *args, **kwargs):
self.order_by = ['{}__name'.format(self.lang)]
return super(RegionList, self).dispatch(request, *args, **kwargs)
class RegionCreate(GenRegionUrl, MultiForm, GenCreate):
model = Region
form_class = RegionForm
forms = formsfull['Region']
class RegionCreateModal(GenCreateModal, RegionCreate):
pass
class RegionUpdate(GenRegionUrl, MultiForm, GenUpdate):
model = Region
form_class = RegionForm
forms = formsfull['Region']
class RegionUpdateModal(GenUpdateModal, RegionUpdate):
pass
class RegionDelete(GenRegionUrl, GenDelete):
model = Region
class RegionForeign(TranslatedMixin, GenRegionUrl, GenForeignKey):
model = Region
label = "{<LANGUAGE_CODE>__name}"
public = True
def get_foreign(self, queryset, search, filters):
# Filter with search string
qsobject = Q(code__istartswith=search)
for lang in settings.LANGUAGES_DATABASES:
qsobject |= Q(**{"{}__name__istartswith".format(lang.lower()): search})
qs = queryset.filter(qsobject)
country = filters.get('country', None)
if country:
qs = qs.filter(country__pk=country)
qs = qs.order_by("{}__name".format(self.lang))
return qs[:settings.LIMIT_FOREIGNKEY]
# ###########################################
# Provinces
class GenProvinceUrl(object):
ws_entry_point = '{}/provinces'.format(settings.CDNX_GEODATA_URL)
class ProvinceList(TranslatedMixin, GenProvinceUrl, GenList):
model = Province
linkadd = False
show_details = False
public = True
def __limitQ__(self, info):
result = {}
try:
params = ast.literal_eval(info.request.GET.get('json'))
except ValueError:
params = {}
continent = int(params.get('continent', 0))
country = int(params.get('country', 0))
region = int(params.get('region', 0))
if continent:
result['continent_limit'] = Q(region__country__continent__pk=continent)
if country:
result['country_limit'] = Q(region__country__pk=country)
if region:
result['region_limit'] = Q(region__pk=region)
return result
def __fields__(self, info):
return [
('code', _('Code')),
('{}__name'.format(self.lang), _('Name')),
('region__code', _('Code region')),
('region__{}__name'.format(self.lang), _('Name region')),
('region__country__code', _('Code country')),
('region__country__{}__name'.format(self.lang), _('Name country')),
('region__country__continent__code', _('Code continent')),
('region__country__continent__{}__name'.format(self.lang), _('Name continent')),
]
def __searchQ__(self, info, text):
filters = self.model().__searchQ__(info, text)
filters['name'] = Q(**{"{}__name__icontains".format(self.lang): text})
filters['region_code'] = Q(**{'region__code': text})
filters['region_name'] = Q(**{'region__{}__name__icontains'.format(self.lang): text})
filters['country_code'] = Q(**{'region__country__code': text})
filters['country_name'] = Q(**{'region__country__{}__name__icontains'.format(self.lang): text})
filters['continent_code'] = Q(**{"region__country__continent__code": text})
filters['continent_name'] = Q(**{"region__country__continent__{}__name__icontains".format(self.lang): text})
return filters
def __searchF__(self, info):
def f(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
def fr(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"region__{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
def fco(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"region__country__continent__{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
def fcu(x):
qsobject = []
for lang in settings.LANGUAGES_DATABASES:
qsobject.append(Q(**{"region__country__{}__name__icontains".format(lang.lower()): x}))
return reduce(operator.or_, qsobject)
filters = self.model().__searchF__(info)
filters['{}__name'.format(self.lang)] = (_('Name'), f, 'input')
filters['region__code'] = (_('Code self'), lambda x: Q(region__code__icontains=x), 'input')
filters['region__{}__name'.format(self.lang)] = (_('Name region'), fr, 'input')
filters['region__country__code'] = (_('Code self'), lambda x: Q(region__country__code__icontains=x), 'input')
filters['region__country__{}__name'.format(self.lang)] = (_('Name country'), fcu, 'input')
filters['region__country__continent__code'] = (_('Code continent'), lambda x: Q(region__country__continent__code__icontains=x), 'input')
filters['region__country__continent__{}__name'.format(self.lang)] = (_('Name continent'), fco, 'input')
return filters
def dispatch(self, request, *args, **kwargs):
self.order_by = ['{}__name'.format(self.lang)]
return super(ProvinceList, self).dispatch(request, *args, **kwargs)
class ProvinceCreate(GenProvinceUrl, MultiForm, GenCreate):
model = Province
form_class = ProvinceForm
forms = formsfull['Province']
class ProvinceCreateModal(GenCreateModal, ProvinceCreate):
pass
class ProvinceUpdate(GenProvinceUrl, MultiForm, GenUpdate):
model = Province
form_class = ProvinceForm
forms = formsfull['Province']
class ProvinceUpdateModal(GenUpdateModal, ProvinceUpdate):
pass
class ProvinceDelete(GenProvinceUrl, GenDelete):
model = Province
class ProvinceForeign(TranslatedMixin, GenProvinceUrl, GenForeignKey):
model = Province
label = "{<LANGUAGE_CODE>__name}"
public = True
def get_foreign(self, queryset, search, filters):
# Filter with search string
qsobject = Q(code__istartswith=search)
for lang in settings.LANGUAGES_DATABASES:
qsobject |= Q(**{"{}__name__istartswith".format(lang.lower()): search})
qs = queryset.filter(qsobject)
region = filters.get('region', None)
if region:
qs = qs.filter(region__pk=region)
qs = qs.order_by("{}__name".format(self.lang))
return qs[:settings.LIMIT_FOREIGNKEY]
# ###########################################
# TimeZone
class GenTimeZoneUrl(object):
ws_entry_point = '{}/timezones'.format(settings.CDNX_GEODATA_URL)
class TimeZoneList(GenTimeZoneUrl, GenList):
model = TimeZone
linkadd = False
show_details = False
public = True
def dispatch(self, request, *args, **kwargs):
self.order_by = ['name']
return super(TimeZoneList, self).dispatch(request, *args, **kwargs)
class TimeZoneCreate(GenTimeZoneUrl, GenCreate):
model = TimeZone
form_class = TimeZoneForm
class TimeZoneCreateModal(GenCreateModal, TimeZoneCreate):
pass
class TimeZoneUpdate(GenTimeZoneUrl, GenUpdate):
model = TimeZone
form_class = TimeZoneForm
class TimeZoneUpdateModal(GenUpdateModal, TimeZoneUpdate):
pass
class TimeZoneDelete(GenTimeZoneUrl, GenDelete):
model = TimeZone
# ###########################################
# City
class GenCityUrl(object):
ws_entry_point = '{}/cities'.format(settings.CDNX_GEODATA_URL)
class CityList(TranslatedMixin, GenCityUrl, GenList):
model = City
linkadd = False
show_details = False
public = True
def __limitQ__(self, info):
result = {}
try:
params = ast.literal_eval(info.request.GET.get('json'))
except ValueError:
params = {}
continent = int(params.get('continent', 0))
country = | |
<filename>code/examples/classifier_compression/sinreq_v2_svhn_runcode/networks/alexnet.py
import tensorflow as tf
from .helper import *
def quantize_acti(x, k):
mini = tf.reduce_min(x)
maxi = tf.reduce_max(x)
x = (x - mini)/(maxi - mini)
G = tf.get_default_graph()
n = float(2**k - 1)
with G.gradient_override_map({"Round": "Identity"}):
return tf.round(x * n) / n
def quantize_weights(x, k):
#mini = tf.reduce_min(x)
#maxi = tf.reduce_max(x)
#x = (x - mini)/(maxi - mini)
G = tf.get_default_graph()
n = float(2**k - 1)
with G.gradient_override_map({"Round": "Identity"}):
return tf.round(x * n) / n
def alexnet_noisy(input_node, netparams, err_mean, err_stddev, train_vars):
weights_noisy, biases_noisy, err_w, err_b = add_noise(netparams['weights'], netparams['biases'], err_mean, err_stddev, train_vars)
mean, variance, scale, offset = netparams['mean'], netparams['variance'], netparams['scale'], netparams['offset']
err_lyr = {}
layers_err = {}
data_spec = get_data_spec('alexnet')
err_lyr['input'] = tf.get_variable(name='input_lyr_err', shape=(1, data_spec.crop_size, data_spec.crop_size, data_spec.channels), initializer=tf.random_normal_initializer(mean=err_mean[0], stddev=err_stddev[0]), trainable=train_vars[0])
input_node_noisy = tf.add(input_node, err_lyr['input'])
conv1 = conv(input_node_noisy, weights_noisy['conv1'], biases_noisy['conv1'], 4, 4, padding='VALID')
err_lyr['conv1'] = tf.get_variable(name='conv1_lyr_err', shape=conv1.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['conv1'] = tf.add(conv1, err_lyr['conv1'])
norm1 = lrn(layers_err['conv1'], 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
conv2 = conv(pool1, weights_noisy['conv2'], biases_noisy['conv2'], 1, 1, group=2)
err_lyr['conv2'] = tf.get_variable(name='conv2_lyr_err', shape=conv2.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['conv2'] = tf.add(conv2, err_lyr['conv2'])
norm2 = lrn(layers_err['conv2'], 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
conv3 = conv(pool2, weights_noisy['conv3'], biases_noisy['conv3'], 1, 1)
err_lyr['conv3'] = tf.get_variable(name='conv3_lyr_err', shape=conv3.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['conv3'] = tf.add(conv3, err_lyr['conv3'])
conv4 = conv(layers_err['conv3'], weights_noisy['conv4'], biases_noisy['conv4'], 1, 1, group=2)
err_lyr['conv4'] = tf.get_variable(name='conv4_lyr_err', shape=conv4.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['conv4'] = tf.add(conv4, err_lyr['conv4'])
conv5 = conv(layers_err['conv4'], weights_noisy['conv5'], biases_noisy['conv5'], 1, 1, group=2)
err_lyr['conv5'] = tf.get_variable(name='conv5_lyr_err', shape=conv5.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['conv5'] = tf.add(conv5, err_lyr['conv5'])
pool5 = max_pool(layers_err['conv5'], 3, 3, 2, 2, padding='VALID')
fc6 = fc(pool5, weights_noisy['fc6'], biases_noisy['fc6'])
err_lyr['fc6'] = tf.get_variable(name='fc6_lyr_err', shape=fc6.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['fc6'] = tf.add(fc6, err_lyr['fc6'])
fc7 = fc(fc6, weights_noisy['fc7'], biases_noisy['fc7'])
err_lyr['fc7'] = tf.get_variable(name='fc7_lyr_err', shape=fc7.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['fc7'] = tf.add(fc7, err_lyr['fc7'])
fc8 = fc(fc7, weights_noisy['fc8'], biases_noisy['fc8'], relu=False)
err_lyr['fc8'] = tf.get_variable(name='fc8_lyr_err', shape=fc8.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
layers_err['fc8'] = tf.add(fc8, err_lyr['fc8'])
return layers_err['fc8'], err_w, err_b, err_lyr
"""
def alexnet_spoilded(input_node, netparams):
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 5, 5, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
conv2 = conv(pool1, weights['conv2'], biases['conv2'], 1, 1, group=2)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
conv3 = conv(pool2, weights['conv3'], biases['conv3'], 1, 1)
conv4 = conv(conv3, weights['conv4'], biases['conv4'], 1, 1, group=2)
conv5 = conv(conv4, weights['conv5'], biases['conv5'], 1, 1, group=2)
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')
fc6 = fc(pool5, weights['fc6'], biases['fc6'])
fc7 = fc(fc6, weights['fc7'], biases['fc7'])
fc8 = fc(fc7, weights['fc8'], biases['fc8'], relu=False)
return fc8
"""
def alexnet(input_node, netparams):
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 4, 4, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
conv2 = conv(pool1, weights['conv2'], biases['conv2'], 1, 1, group=2)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
conv3 = conv(pool2, weights['conv3'], biases['conv3'], 1, 1)
conv4 = conv(conv3, weights['conv4'], biases['conv4'], 1, 1, group=2)
conv5 = conv(conv4, weights['conv5'], biases['conv5'], 1, 1, group=2)
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')
fc6 = fc(pool5, weights['fc6'], biases['fc6'])
fc7 = fc(fc6, weights['fc7'], biases['fc7'])
fc8 = fc(fc7, weights['fc8'], biases['fc8'], relu=False)
return fc8
def svhn_net(input_node, netparams):
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('svhn_net')
conv1 = conv(input_node, weights['hidden1'], biases['hidden1'], 4, 4, padding='VALID', relu=False)
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 2, 2, 2, 2, padding='VALID')
conv2 = conv(pool1, weights['hidden2'], biases['hidden2'], 5, 5, group=2, relu=True)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 2, 2, 1, 1, padding='VALID')
conv3 = conv(pool2, weights['hidden3'], biases['hidden3'], 5, 5, relu=True)
norm3 = lrn(conv3, 2, 1.99999994948e-05, 0.75)
pool3 = max_pool(norm3, 2, 2, 2, 2, padding='VALID')
conv4 = conv(pool3, weights['hidden4'], biases['hidden4'], 5, 5, relu=True)
norm4 = lrn(conv4, 2, 1.99999994948e-05, 0.75)
pool4 = max_pool(norm4, 2, 2, 1, 1, padding='VALID')
conv5 = conv(pool4, weights['hidden5'], biases['hidden5'], 5, 5, relu=True)
norm5 = lrn(conv5, 2, 1.99999994948e-05, 0.75)
pool5 = max_pool(norm5, 2, 2, 2, 2, padding='VALID')
conv6 = conv(pool5, weights['hidden6'], biases['hidden6'], 5, 5, relu=True)
norm6 = lrn(conv6, 2, 1.99999994948e-05, 0.75)
pool6 = max_pool(norm6, 2, 2, 1, 1, padding='VALID')
conv7 = conv(pool6, weights['hidden7'], biases['hidden7'], 5, 5, relu=True)
norm7 = lrn(conv7, 2, 1.99999994948e-05, 0.75)
pool7 = max_pool(norm7, 2, 2, 2, 2, padding='VALID')
conv8 = conv(pool7, weights['hidden8'], biases['hidden8'], 5, 5, relu=True)
norm8 = lrn(conv8, 2, 1.99999994948e-05, 0.75)
pool8 = max_pool(norm8, 2, 2, 1, 1, padding='VALID')
flatten = tf.reshape(pool8, [-1, 4 * 4 * 192])
hidden9 = fc(flatten, weights['hidden9'], biases['hidden9'])
hidden10 = fc(hidden9, weights['hidden10'], biases['hidden10'])
length = fc(hidden10, weights['length'], biases['length'])
digit1 = fc(hidden10, weights['digit1'], biases['digit1'])
digit2 = fc(hidden10, weights['digit2'], biases['digit2'])
digit3 = fc(hidden10, weights['digit3'], biases['digit3'])
digit4 = fc(hidden10, weights['digit4'], biases['digit4'])
digit5 = fc(hidden10, weights['digit5'], biases['digit5'])
length_logits, digits_logits = length, tf.stack([digit1, digit2, digit3, digit4, digit5], axis=1)
return length_logits, digits_logits
def alexnet_q(input_node, netparams, qbits):
# qbits = [16, 8, 8, 4, 8, 8, 8, 16]
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 4, 4, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
k = qbits[1]
weights_conv2_q = quantize_weights(weights['conv2'], k)
conv2 = conv(pool1, weights_conv2_q, biases['conv2'], 1, 1, group=2)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
k = qbits[2]
weights_conv3_q = quantize_weights(weights['conv3'], k)
conv3 = conv(pool2, weights_conv3_q, biases['conv3'], 1, 1)
k = qbits[3]
weights_conv4_q = quantize_weights(weights['conv4'], k)
conv4 = conv(conv3, weights_conv4_q, biases['conv4'], 1, 1, group=2)
k = qbits[4]
weights_conv5_q = quantize_weights(weights['conv5'], k)
conv5 = conv(conv4, weights_conv5_q, biases['conv5'], 1, 1, group=2)
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')
k = qbits[5]
weights_fc6_q = quantize_weights(weights['fc6'], k)
fc6 = fc(pool5, weights_fc6_q, biases['fc6'])
k = qbits[6]
weights_fc7_q = quantize_weights(weights['fc7'], k)
fc7 = fc(fc6, weights_fc7_q, biases['fc7'])
fc8 = fc(fc7, weights['fc8'], biases['fc8'], relu=False)
return fc8
def alexnet_q_1(input_node, netparams):
# qbits = [16, 8, 8, 4, 8, 8, 8, 16]
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 4, 4, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
k = 8
weights_conv2_q = quantize_weights(weights['conv2'], k)
conv2 = conv(pool1, weights_conv2_q, biases['conv2'], 1, 1, group=2)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
k = 8
weights_conv3_q = quantize_weights(weights['conv3'], k)
conv3 = conv(pool2, weights_conv3_q, biases['conv3'], 1, 1)
#k = 4
#weights_conv4_q = quantize_weights(weights['conv4'], k)
conv4 = conv(conv3, weights['conv4'], biases['conv4'], 1, 1, group=2)
k = 8
weights_conv5_q = quantize_weights(weights['conv5'], k)
conv5 = conv(conv4, weights_conv5_q, biases['conv5'], 1, 1, group=2)
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')
k = 8
weights_fc6_q = quantize_weights(weights['fc6'], k)
fc6 = fc(pool5, weights_fc6_q, biases['fc6'])
k = 8
weights_fc7_q = quantize_weights(weights['fc7'], k)
fc7 = fc(fc6, weights_fc7_q, biases['fc7'])
fc8 = fc(fc7, weights['fc8'], biases['fc8'], relu=False)
return fc8, conv4
def alexnet_q_sin2(input_node, netparams, qbits):
# qbits = [16, 8, 8, 4, 8, 8, 8, 16]
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 4, 4, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
k = qbits[1]
weights_conv2_q = quantize_weights(weights['conv2'], k)
conv2 = conv(pool1, weights['conv2'], biases['conv2'], 1, 1, group=2)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
k = qbits[2]
weights_conv3_q = quantize_weights(weights['conv3'], k)
conv3 = conv(pool2, weights['conv3'], biases['conv3'], 1, 1)
k = qbits[3]
weights_conv4_q = quantize_weights(weights['conv4'], k)
q_diff = tf.subtract(weights_conv4_q, weights['conv4'])
#q_diff_cost = tf.nn.l2_loss(q_diff)
q_diff_cost = tf.reduce_mean(q_diff)
conv4 = conv(conv3, weights['conv4'], biases['conv4'], 1, 1, group=2)
k = qbits[4]
weights_conv5_q = quantize_weights(weights['conv5'], k)
conv5 = conv(conv4, weights['conv5'], biases['conv5'], 1, 1, group=2)
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')
k = qbits[5]
weights_fc6_q = quantize_weights(weights['fc6'], k)
fc6 = fc(pool5, weights['fc6'], biases['fc6'])
k = qbits[6]
weights_fc7_q = quantize_weights(weights['fc7'], k)
fc7 = fc(fc6, weights['fc7'], biases['fc7'])
fc8 = fc(fc7, weights['fc8'], biases['fc8'], relu=False)
return fc8, weights['conv4']
def alexnet_q_RL_v0(input_node, netparams, qbits, layer_idx):
# qbits = [16, 8, 8, 4, 8, 8, 8, 16]
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
""" first layer is kept in full precision """
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 4, 4, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
k = qbits[1]
if layer_idx == 1:
weights_conv2_q = weights['conv2']
else:
weights_conv2_q = quantize_weights(weights['conv2'], k)
conv2 = conv(pool1, weights_conv2_q, biases['conv2'], 1, 1, group=2)
norm2 = lrn(conv2, 2, 1.99999994948e-05, 0.75)
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')
k = qbits[2]
if layer_idx == 2:
weights_conv3_q = weights['conv3']
else:
weights_conv3_q = quantize_weights(weights['conv3'], k)
conv3 = conv(pool2, weights_conv3_q, biases['conv3'], 1, 1)
k = qbits[3]
if layer_idx == 3:
weights_conv4_q = weights['conv4']
else:
weights_conv4_q = quantize_weights(weights['conv4'], k)
conv4 = conv(conv3, weights_conv4_q, biases['conv4'], 1, 1, group=2)
k = qbits[4]
if layer_idx == 4:
weights_conv5_q = weights['conv5']
else:
weights_conv5_q = quantize_weights(weights['conv5'], k)
conv5 = conv(conv4, weights_conv5_q, biases['conv5'], 1, 1, group=2)
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')
k = qbits[5]
if layer_idx == 5:
weights_fc6_q = weights['fc6']
else:
weights_fc6_q = quantize_weights(weights['fc6'], k)
fc6 = fc(pool5, weights_fc6_q, biases['fc6'])
k = qbits[6]
if layer_idx == 6:
weights_fc7_q = weights['fc7']
else:
weights_fc7_q = quantize_weights(weights['fc7'], k)
fc7 = fc(fc6, weights_fc7_q, biases['fc7'])
""" last layer is kept in full precision """
fc8 = fc(fc7, weights['fc8'], biases['fc8'], relu=False)
return fc8, conv4
def alexnet_q_RL(input_node, netparams, qbits, layer_idx):
# qbits = [16, 8, 8, 4, 8, 8, 8, 16]
num_layers = 8
layer_quantize = [0] * num_layers
layer_quantize[0:layer_idx] = [1] * (layer_idx)
weights, biases = netparams['weights'], netparams['biases']
data_spec = get_data_spec('alexnet')
""" first layer is kept in full precision """
conv1 = conv(input_node, weights['conv1'], biases['conv1'], 4, 4, padding='VALID')
norm1 = lrn(conv1, 2, 1.99999994948e-05, 0.75)
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')
k = qbits[1]
if layer_quantize[1] | |
from asyncio import get_event_loop, InvalidStateError
import time
import pytest
from aiorpcx.curio import *
def sum_all(*values):
return sum(values)
async def my_raises(exc):
raise exc
async def return_value(x, secs=0):
if secs:
await sleep(secs)
return x
# Test exports
sleep
CancelledError
Event
Lock
Queue
Semaphore
@pytest.mark.asyncio
async def test_run_in_thread():
assert await run_in_thread(sum_all) == 0
assert await run_in_thread(sum_all, 1) == 1
assert await run_in_thread(sum_all, 1, 2, 3) == 6
@pytest.mark.asyncio
async def test_next_done():
t = TaskGroup()
assert t.completed is None
assert await t.next_done() is None
assert await t.next_done() is None
tasks = ()
t = TaskGroup(tasks)
assert t.completed is None
assert await t.next_done() is None
tasks = (await spawn(sleep, 0.01), await spawn(sleep, 0.02))
t = TaskGroup(tasks)
assert (await t.next_done(), await t.next_done()) == tasks
assert t.completed is tasks[0]
assert await t.next_done() is None
tasks = (await spawn(sleep, 0), await spawn(sleep, 0.01))
tasks[0].cancel()
await sleep(0)
t = TaskGroup(tasks)
assert (await t.next_done(), await t.next_done()) == tasks
assert await t.next_done() is None
tasks = (await spawn(sleep(0.002)), await spawn(sleep, 0.001))
t = TaskGroup(tasks)
assert await t.next_done() == tasks[1]
assert await t.next_done() == tasks[0]
assert await t.next_done() is None
assert t.completed is tasks[1]
tasks = (await spawn(sleep, 0.02), await spawn(sleep, 0.01))
for task in tasks:
task.cancel()
t = TaskGroup(tasks)
assert await t.next_done() == tasks[0]
assert await t.next_done() == tasks[1]
assert await t.next_done() is None
@pytest.mark.asyncio
async def test_next_result():
t = TaskGroup()
with pytest.raises(RuntimeError):
await t.next_result()
tasks = ()
t = TaskGroup(tasks)
with pytest.raises(RuntimeError):
await t.next_result()
tasks = (await spawn(return_value(1)), await spawn(return_value(2)))
t = TaskGroup(tasks)
assert (await t.next_result(), await t.next_result()) == (1, 2)
with pytest.raises(RuntimeError):
await t.next_result()
@pytest.mark.asyncio
async def test_tg_spawn():
t = TaskGroup()
task = await t.spawn(sleep, 0.01)
assert await t.next_done() == task
assert await t.next_done() is None
task = await t.spawn(sleep(0.01))
assert await t.next_done() == task
@pytest.mark.asyncio
async def test_tg_cancel_remaining():
tasks = [await spawn(sleep, x/200) for x in range(1, 4)]
t = TaskGroup(tasks)
assert await t.next_done()
await t.cancel_remaining()
assert not tasks[0].cancelled()
assert all(task.cancelled() for task in tasks[1:])
@pytest.mark.asyncio
async def test_tg_aiter():
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
t = TaskGroup(tasks)
result = [task async for task in t]
assert result == list(reversed(tasks))
@pytest.mark.asyncio
async def test_tg_join_no_arg():
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
t = TaskGroup(tasks)
await t.join()
assert all(task.done() for task in tasks)
assert not any(task.cancelled() for task in tasks)
@pytest.mark.asyncio
async def test_tg_cm_no_arg():
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
async with TaskGroup(tasks) as t:
pass
assert all(task.done() for task in tasks)
assert not any(task.cancelled() for task in tasks)
assert t.completed is tasks[-1]
@pytest.mark.asyncio
async def test_tg_cm_all():
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
async with TaskGroup(tasks, wait=all) as t:
pass
assert all(task.done() for task in tasks)
assert not any(task.cancelled() for task in tasks)
assert t.completed is tasks[-1]
@pytest.mark.asyncio
async def test_tg_cm_any():
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
async with TaskGroup(tasks, wait=any) as t:
pass
assert all(task.done() for task in tasks)
assert not tasks[-1].cancelled()
assert all(task.cancelled() for task in tasks[:-1])
assert t.completed is tasks[-1]
@pytest.mark.asyncio
async def test_tg_join_object():
tasks = [await spawn(return_value(None, 0.01)),
await spawn(return_value(3, 0.02))]
t = TaskGroup(tasks, wait=object)
await t.join()
assert tasks[0].result() == None
assert tasks[1].result() == 3
assert t.completed is tasks[1]
tasks = [await spawn(return_value(None, 0.01)),
await spawn(return_value(4, 0.02)),
await spawn(return_value(2, 0.03))]
t = TaskGroup(tasks, wait=object)
await t.join()
assert tasks[0].result() == None
assert tasks[1].result() == 4
assert tasks[2].cancelled()
assert t.completed is tasks[1]
@pytest.mark.asyncio
async def test_tg_cm_object():
tasks = [await spawn(return_value(None, 0.01)),
await spawn(return_value(3, 0.02))]
async with TaskGroup(tasks, wait=object) as t:
pass
assert tasks[0].result() == None
assert tasks[1].result() == 3
assert t.completed is tasks[1]
tasks = [await spawn(return_value(None, 0.01)),
await spawn(return_value(4, 0.02)),
await spawn(return_value(2, 0.03))]
async with TaskGroup(tasks, wait=object) as t:
pass
assert tasks[0].result() == None
assert tasks[1].result() == 4
assert tasks[2].cancelled()
assert t.completed is tasks[1]
@pytest.mark.asyncio
async def test_tg_join_errored():
for wait in (all, any, object):
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
t = TaskGroup(tasks, wait=wait)
bad_task = await t.spawn(my_raises(ArithmeticError))
with pytest.raises(ArithmeticError):
await t.join()
assert all(task.cancelled() for task in tasks)
assert bad_task.done() and not bad_task.cancelled()
assert t.completed is None
@pytest.mark.asyncio
async def test_tg_cm_errored():
for wait in (all, any, object):
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
with pytest.raises(EOFError):
async with TaskGroup(tasks, wait=wait) as t:
bad_task = await t.spawn(my_raises(EOFError))
assert all(task.cancelled() for task in tasks)
assert bad_task.done() and not bad_task.cancelled()
assert t.completed is None
@pytest.mark.asyncio
async def test_tg_join_errored_past():
for wait in (all, any, object):
tasks = [await spawn(my_raises, AttributeError) for n in range(3)]
t = TaskGroup(tasks, wait=wait)
tasks[1].cancel()
await sleep(0.001)
good_task = await t.spawn(return_value(3, 0.001))
with pytest.raises(AttributeError):
await t.join()
assert good_task.cancelled()
assert t.completed is None
@pytest.mark.asyncio
async def test_cm_join_errored_past():
for wait in (all, any, object):
tasks = [await spawn(my_raises, BufferError) for n in range(3)]
with pytest.raises(BufferError):
async with TaskGroup(tasks, wait=wait) as t:
tasks[1].cancel()
await sleep(0.001)
good_task = await t.spawn(return_value(3, 0.001))
assert good_task.cancelled()
assert t.completed is None
@pytest.mark.asyncio
async def test_cm_raises():
tasks = [await spawn(sleep, 0.01) for n in range(3)]
with pytest.raises(ValueError) as e:
async with TaskGroup(tasks) as t:
raise ValueError
assert all(task.cancelled() for task in tasks)
@pytest.mark.asyncio
async def test_cm_add_later():
tasks = [await spawn(sleep, 0) for n in range(3)]
with pytest.raises(LookupError):
async with TaskGroup(tasks) as t:
await sleep(0.001)
task = await t.spawn(my_raises, LookupError)
assert all(task.result() is None for task in tasks)
assert t.completed in tasks
@pytest.mark.asyncio
async def test_tg_multiple_groups():
task = await spawn(my_raises, FloatingPointError)
t1 = TaskGroup([task])
with pytest.raises(RuntimeError):
TaskGroup([task])
t3 = TaskGroup()
with pytest.raises(RuntimeError):
await t3.add_task(task)
with pytest.raises(FloatingPointError):
await task
@pytest.mark.asyncio
async def test_tg_closed():
task = await spawn(return_value(3))
for wait in (all, any, object):
t = TaskGroup()
assert not t.closed()
await t.join()
assert t.closed()
with pytest.raises(RuntimeError):
await t.spawn(my_raises, ImportError)
with pytest.raises(RuntimeError):
await t.add_task(task)
await task
@pytest.mark.asyncio
async def test_tg_wait_bad():
tasks = [await spawn(sleep, x/200) for x in range(5, 0, -1)]
with pytest.raises(ValueError):
TaskGroup(tasks, wait=None)
assert not any(task.cancelled() for task in tasks)
for task in tasks:
await task
class MyLogger(object):
def __init__(self):
self.logged = False
def error(self, msg, *args, **kwargs):
self.logged = True
@pytest.mark.asyncio
async def test_logging(caplog):
for report_crash in (True, False):
# spawn
task = await spawn(my_raises(TypeError), report_crash=report_crash)
try:
await task
assert False
except TypeError:
pass
assert any('TypeError' in record.message for record in caplog.records)
async def return_after_sleep(x, period=0.01):
await sleep(period)
return x
@pytest.mark.asyncio
async def test_timeout_after_coro_callstyles():
async def t1(*values):
return 1 + sum(values)
assert await timeout_after(0.01, t1) == 1
assert await timeout_after(0.01, t1()) == 1
assert await timeout_after(0.01, t1(2, 8)) == 11
assert await timeout_after(0.01, t1, 2, 8) == 11
coro = t1()
with pytest.raises(ValueError):
await timeout_after(0, coro, 1)
await coro
@pytest.mark.asyncio
async def test_timeout_after_zero():
async def t1(*values):
return 1 + sum(values)
assert await timeout_after(0, t1) == 1
assert await timeout_after(0, t1, 2) == 3
assert await timeout_after(0, t1, 2, 8) == 11
@pytest.mark.asyncio
async def test_timeout_after_no_expire():
async def t1(*values):
return await return_after_sleep(1 + sum(values), 0.01)
try:
assert await timeout_after(0.02, t1, 1) == 2
except TaskTimeout:
assert False
await sleep(0.02)
assert True
@pytest.mark.asyncio
async def test_nested_after_no_expire_nested():
async def coro1():
pass
async def child():
await timeout_after(0.001, coro1())
async def parent():
await timeout_after(0.003, child())
await parent()
try:
await sleep(0.005)
except CancelledError:
assert False
@pytest.mark.asyncio
async def test_nested_after_no_expire_nested2():
async def coro1():
pass
async def child():
await timeout_after(0.001, coro1())
await sleep(0.005)
async def parent():
try:
await timeout_after(0.003, child())
except TaskTimeout:
return
assert False
await parent()
@pytest.mark.asyncio
async def test_timeout_after_raises_IndexError():
try:
await timeout_after(0.01, my_raises, IndexError)
except IndexError:
return
assert False
@pytest.mark.asyncio
async def test_timeout_after_raises_CancelledError():
try:
await timeout_after(0.01, my_raises, CancelledError)
except CancelledError:
return
assert False
@pytest.mark.asyncio
async def test_nested_timeout():
results = []
async def coro1():
results.append('coro1 start')
await sleep(1)
results.append('coro1 done')
async def coro2():
results.append('coro2 start')
await sleep(1)
results.append('coro2 done')
# Parent should cause a timeout before the child.
# Results in a TimeoutCancellationError instead of a normal TaskTimeout
async def child():
try:
await timeout_after(0.05, coro1())
results.append('coro1 success')
except TaskTimeout:
results.append('coro1 timeout')
except TimeoutCancellationError:
results.append('coro1 timeout cancel')
await coro2()
results.append('coro2 success')
async def parent():
try:
await timeout_after(0.01, child())
except TaskTimeout:
results.append('parent timeout')
await parent()
assert results == [
'coro1 start',
'coro1 timeout cancel',
'coro2 start',
'parent timeout'
]
@pytest.mark.asyncio
async def test_nested_context_timeout():
results = []
async def coro1():
results.append('coro1 start')
await sleep(1)
results.append('coro1 done')
async def coro2():
results.append('coro2 start')
await sleep(1)
results.append('coro2 done')
# Parent should cause a timeout before the child.
# Results in a TimeoutCancellationError instead of a normal TaskTimeout
async def child():
try:
async with | |
<gh_stars>0
#copyright ReportLab Inc. 2000-2019
#see license.txt for license details
"""preppy - a Python preprocessor.
This is the Python equivalent of ASP or JSP - a preprocessor which lets you
embed python expressions, loops and conditionals, and 'scriptlets' in any
kind of text file. It provides a very natural solution for generating
dynamic HTML pages, which is not connected to any particular web server
architecture.
You create a template file (conventionally ending in .prep) containing
python expressions, loops and conditionals, and scripts. These occur
between double curly braces:
Dear {{surname}},
You owe us {{amount}} {{if amount>1000}}which is pretty serious{{endif}}
On first use or any any change in the template, this is normally converted to a
python source module 'in memory', then to a compiled pyc file which is saved to
disk alongside the original. Options control this; you can operate entirely
in memory, or look at the generated python code if you wish.
On subsequent use, the generated module is imported and loaded directly.
The module contains a run(...) function; you can pass in a dictionary of
parameters (such as the surname and amount parameters above), and optionally
an output stream or output-collection function if you don't want it to go to
standard output.
The command line options let you run modules with hand-input parameters -
useful for basic testing - and also to batch-compile or clean directories.
As with python scripts, it is a good idea to compile prep files on installation,
since unix applications may run as a different user and not have the needed
permission to store compiled modules.
"""
VERSION = '4.0.2'
__version__ = VERSION
USAGE = """
The command line interface lets you test, compile and clean up:
preppy modulename [arg1=value1, arg2=value2.....]
- shorthand for 'preppy run ...', see below.
preppy run modulename [arg1=value1, arg2=value2.....]
- runs the module, optionally with arguments. e.g.
preppy.py flintstone.prep name=fred sex=m
preppy.py compile [-f] [-v] [-p] module1[.prep] module2[.prep] module3 ...
- compiles explicit modules
preppy.py compile [-f] [-v] [-p] dirname1 dirname2 ...
- compiles all prep files in directory recursively
preppy.py clean dirname1 dirname2 ...19
- removes any py or pyc files created from past compilations
"""
STARTDELIMITER = "{{"
ENDDELIMITER = "}}"
QSTARTDELIMITER = "{${"
QENDDELIMITER = "}$}"
QUOTE = "$"
QUOTEQUOTE = "$$"
# SEQUENCE OF REPLACEMENTS FOR UNESCAPING A STRING.
UNESCAPES = ((QSTARTDELIMITER, STARTDELIMITER), (QENDDELIMITER, ENDDELIMITER), (QUOTEQUOTE, QUOTE))
import re, sys, os, struct, tokenize, token, ast, traceback, time, marshal, pickle, inspect, textwrap
from hashlib import md5
isPy3 = sys.version_info.major == 3
isPy33 = isPy3 and sys.version_info.minor>=3
isPy34 = isPy33 and sys.version_info.minor>=4
isPy37 = isPy33 and sys.version_info.minor>=7
isPy38 = isPy33 and sys.version_info.minor>=8
isPy39 = isPy33 and sys.version_info.minor>=9
isPy310 = isPy33 and sys.version_info.minor>=10
_usePyCache = isPy3 and False #change if you don't have legacy ie python 2.7 usage
from xml.sax.saxutils import escape as xmlEscape
from collections import namedtuple
Token = namedtuple('Token','kind start end')
_verbose = int(os.environ.get('RL_verbose','0'))
from keyword import iskeyword
if isPy3:
xrange = range
from io import BytesIO, StringIO
def __preppy__vlhs__(s):
try:
s = s.strip()
return s.isidentifier() and not iskeyword(s)
except:
return False
class SafeString(bytes):
'''either a SafeString or a SafeUnicode depending on argument type'''
def __new__(cls,v):
return str.__new__(SafeUnicode,v) if isinstance(v,str) else bytes.__new__(cls,v)
class SafeUnicode(str):
'''either a SafeString or a SafeUnicode depending on argument type'''
def __new__(cls,v):
return bytes.__new__(SafeString,v) if isinstance(v,bytes) else str.__new__(cls,v)
_ucvn = '__str__' #unicode conversion
_bcvn = '__bytes__' #bytes conversion
bytesT = bytes
unicodeT = str
strTypes = (str,bytes)
import builtins
rl_exec = getattr(builtins,'exec')
del builtins
else:
from StringIO import StringIO
BytesIO = StringIO
try:
isidentifier = tokenize.Name
except AttributeError:
isidentifier = '[a-zA-Z_][a-zA-Z0-9_]*'
isidentifier = re.compile('^%s$' % isidentifier).match
def __preppy__vlhs__(s):
try:
s = s.strip()
return s!='None' and isidentifier(s) and not iskeyword(s)
except:
return False
class SafeString(str):
'''either a SafeString or a SafeUnicode depending on argument type'''
def __new__(cls,v):
return unicode.__new__(SafeUnicode,v) if isinstance(v,unicode) else str.__new__(cls,v)
class SafeUnicode(unicode):
'''either a SafeString or a SafeUnicode depending on argument type'''
def __new__(cls,v):
return str.__new__(SafeString,v) if isinstance(v,str) else unicode.__new__(cls,v)
_ucvn = '__unicode__'
_bcvn = '__str__'
bytesT = str
unicodeT = unicode
strTypes = basestring
def rl_exec(obj, G=None, L=None):
if G is None:
frame = sys._getframe(1)
G = frame.f_globals
if L is None:
L = frame.f_locals
del frame
elif L is None:
L = G
exec("""exec obj in G, L""")
class AstTry:
_attributes = ('lineno','col_offset')
_fields = ('body','handlers','orelse','finalbody')
def __init__(self,**kwds):
self.lineno = 1
self.col_offset = 0
self.__dict__.update(kwds)
def convertTry(self):
if not self.handlers:
return ast.TryFinally(lineno=self.lineno,col_offset=self.col_offset,body=self.body,finalbody=self.finalbody)
elif not self.finalbody:
return ast.TryExcept(lineno=self.lineno,col_offset=self.col_offset,body=self.body,handlers=self.handlers,orelse=self.orelse)
else:
return ast.TryFinally(lineno=self.lineno,col_offset=self.col_offset,
body=[ast.TryExcept(lineno=self.lineno,col_offset=self.col_offset,body=self.body,handlers=self.handlers,orelse=self.orelse)],
finalbody=self.finalbody)
ast.Try = AstTry
defaultLConv = ['unicode','str']
def asUtf8(s):
return s if isinstance(s,bytesT) else s.encode('utf8')
def asUnicode(s):
return s if isinstance(s,unicodeT) else s.decode('utf8')
def getMd5(s):
return md5(asUtf8(s)+asUtf8(VERSION)).hexdigest()
class AbsLineNo(int):
pass
def uStdConv(s):
if not isinstance(s,strTypes):
if s is None: return u'' #we usually don't want output
cnv = getattr(s,_ucvn,None)
if not cnv:
cnv = getattr(s,_bcvn,None)
s = cnv() if cnv else str(s)
if not isinstance(s,unicodeT):
s = s.decode('utf8')
return s
def bStdConv(s):
return uStdConv(s).encode('utf8')
def __get_conv__(qf,lqf,b):
'''return the quoteFunc, lquoteFunc given values for same and
whether the original was bytes'''
if qf and not lqf:
lqf = asUtf8 if isinstance(qf(''),bytesT) else asUnicode
elif lqf and not qf:
qf = bStdConv if isinstance(lqf(''),bytesT) else uStdConv
elif not qf and not lqf:
if b:
qf = bStdConv
lqf = bytesT
else:
qf = uStdConv
lqf = unicodeT
return qf, lqf
class __wsscontroller__:
class ignore(str):
pass
wsc = u''.join((
#u'\u000A', # LINE FEED
u'\u0009', # HORIZONTAL TABULATION
u'\u000B', # VERTICAL TABULATION
u'\u000C', # FORM FEED
u'\u000D', # CARRIAGE RETURN
u'\u001C', # FILE SEPARATOR
u'\u001D', # GROUP SEPARATOR
u'\u001E', # RECORD SEPARATOR
u'\u001F', # UNIT SEPARATOR
u'\u0020', # SPACE
u'\u0085', # NEXT LINE
u'\u00A0', # NO-BREAK SPACE
u'\u1680', # OGHAM SPACE MARK
u'\u2000', # EN QUAD
u'\u2001', # EM QUAD
u'\u2002', # EN SPACE
u'\u2003', # EM SPACE
u'\u2004', # THREE-PER-EM SPACE
u'\u2005', # FOUR-PER-EM SPACE
u'\u2006', # SIX-PER-EM SPACE
u'\u2007', # FIGURE SPACE
u'\u2008', # PUNCTUATION SPACE
u'\u2009', # THIN SPACE
u'\u200A', # HAIR SPACE
u'\u200B', # ZERO WIDTH SPACE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
u'\u202F', # NARROW NO-BREAK SPACE
u'\u205F', # MEDIUM MATHEMATICAL SPACE
u'\u3000', # IDEOGRAPHIC SPACE
))
pats = {
1: re.compile(u'^[%s]*' % wsc),
2: re.compile(u'^[%s]*' % (wsc+u'\u000A')),
}
def __init__(self):
self.ws = 0
def dnl(self):
self.ws = 1 #delete following white space to next line
return self.ignore('')
def dws(self):
self.ws = 2 #delete following white space
return self.ignore('')
def x(self,s):
if not isinstance(s,self.ignore):
self.ws = 0
return s
def c(self,s):
ws = self.ws
self.ws = 0
if ws:
b = isinstance(s,bytesT)
if b: s = asUnicode(s)
s = self.pats[ws].sub('',s)
if ws==1 and s[0]==u'\n': s = s[1:]
if b: s = asUtf8(s)
return s
#Andy's standard quote for django
_safeBase = SafeString, SafeUnicode
def uStdQuote(s):
if not isinstance(s,strTypes):
if s is None: return u'' #we usually don't want output
cnv = getattr(s,_ucvn,None)
if not cnv:
cnv = getattr(s,_bcvn,None)
s = cnv() if cnv else unicodeT(s)
if isinstance(s,_safeBase):
if isinstance(s,SafeString):
s = s.decode('utf8')
return s
elif not isinstance(s,unicodeT):
s = s.decode('utf8')
return xmlEscape(s)
def bStdQuote(s):
return uStdQuote(s).encode('utf8')
stdQuote = bStdQuote
def pnl(s):
'''print without a lineend'''
if not isPy3 and isinstance(s,unicodeT):
s = s.encode(sys.stdout.encoding,'replace')
sys.stdout.write(s)
def pel(s):
'''print with a line ending'''
pnl(s)
pnl('\n')
def unescape(s, unescapes=UNESCAPES):
for (old, new) in unescapes:
s = s.replace(old, new)
return s
teststring = """
this test script should produce a runnable program
{{script}}
class X:
pass
x = X()
x.a = "THE A VALUE OF X"
yislonger = "y is longer!"
import math
a = dictionary = {"key": "value", "key2": "value2", "10%": "TEN PERCENT"}
loop = "LOOP"
{{endscript}}
this line has a percent in it 10%
here is the a value in x: {{x.a}}
just a norml value here: {{yislonger}} string {{a["10%"]}}
the sine of 12.3 is {{math.sin(12.3)}}
{{script}} a=0 {{endscript}}
these parens should be empty
({{if a:}}
conditional text{{endif}})
{{script}} a=1
{{endscript}}
these parens should be full
({{if a:}}
conditional text{{endif}})
stuff between endif and while
{{while a==1:}} infinite {{loop}} forever!
{{script}} a=0 {{endscript}}
{{for (a,b) in dictionary.items():}}
the key in the dictionary is {{a}} and the value is {{b}}. And below is a script
{{script}}
# THIS IS A SCRIPT
x = 2
y = 3
# END OF THE SCRIPT
{{endscript}}
stuff after the script
{{endfor}}
stuff after the for stmt
{{endwhile}}
stuff after the while stmt
{{script}}
# test the free variables syntax error problem is gone
alpha = 3
def | |
_cv.CvConvexityDefect_end_set)
__swig_setmethods__["depth_point"] = _cv.CvConvexityDefect_depth_point_set
__swig_getmethods__["depth_point"] = _cv.CvConvexityDefect_depth_point_get
if _newclass:depth_point = _swig_property(_cv.CvConvexityDefect_depth_point_get, _cv.CvConvexityDefect_depth_point_set)
__swig_setmethods__["depth"] = _cv.CvConvexityDefect_depth_set
__swig_getmethods__["depth"] = _cv.CvConvexityDefect_depth_get
if _newclass:depth = _swig_property(_cv.CvConvexityDefect_depth_get, _cv.CvConvexityDefect_depth_set)
def __init__(self, *args):
"""__init__(self) -> CvConvexityDefect"""
this = _cv.new_CvConvexityDefect(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvConvexityDefect
__del__ = lambda self : None;
CvConvexityDefect_swigregister = _cv.CvConvexityDefect_swigregister
CvConvexityDefect_swigregister(CvConvexityDefect)
class CvQuadEdge2D(_object):
"""Proxy of C++ CvQuadEdge2D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvQuadEdge2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvQuadEdge2D, name)
__repr__ = _swig_repr
__swig_setmethods__["flags"] = _cv.CvQuadEdge2D_flags_set
__swig_getmethods__["flags"] = _cv.CvQuadEdge2D_flags_get
if _newclass:flags = _swig_property(_cv.CvQuadEdge2D_flags_get, _cv.CvQuadEdge2D_flags_set)
__swig_setmethods__["pt"] = _cv.CvQuadEdge2D_pt_set
__swig_getmethods__["pt"] = _cv.CvQuadEdge2D_pt_get
if _newclass:pt = _swig_property(_cv.CvQuadEdge2D_pt_get, _cv.CvQuadEdge2D_pt_set)
__swig_setmethods__["next"] = _cv.CvQuadEdge2D_next_set
__swig_getmethods__["next"] = _cv.CvQuadEdge2D_next_get
if _newclass:next = _swig_property(_cv.CvQuadEdge2D_next_get, _cv.CvQuadEdge2D_next_set)
def __init__(self, *args):
"""__init__(self) -> CvQuadEdge2D"""
this = _cv.new_CvQuadEdge2D(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvQuadEdge2D
__del__ = lambda self : None;
CvQuadEdge2D_swigregister = _cv.CvQuadEdge2D_swigregister
CvQuadEdge2D_swigregister(CvQuadEdge2D)
class CvSubdiv2DPoint(_object):
"""Proxy of C++ CvSubdiv2DPoint class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvSubdiv2DPoint, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvSubdiv2DPoint, name)
__repr__ = _swig_repr
__swig_setmethods__["flags"] = _cv.CvSubdiv2DPoint_flags_set
__swig_getmethods__["flags"] = _cv.CvSubdiv2DPoint_flags_get
if _newclass:flags = _swig_property(_cv.CvSubdiv2DPoint_flags_get, _cv.CvSubdiv2DPoint_flags_set)
__swig_setmethods__["first"] = _cv.CvSubdiv2DPoint_first_set
__swig_getmethods__["first"] = _cv.CvSubdiv2DPoint_first_get
if _newclass:first = _swig_property(_cv.CvSubdiv2DPoint_first_get, _cv.CvSubdiv2DPoint_first_set)
__swig_setmethods__["pt"] = _cv.CvSubdiv2DPoint_pt_set
__swig_getmethods__["pt"] = _cv.CvSubdiv2DPoint_pt_get
if _newclass:pt = _swig_property(_cv.CvSubdiv2DPoint_pt_get, _cv.CvSubdiv2DPoint_pt_set)
def __init__(self, *args):
"""__init__(self) -> CvSubdiv2DPoint"""
this = _cv.new_CvSubdiv2DPoint(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvSubdiv2DPoint
__del__ = lambda self : None;
CvSubdiv2DPoint_swigregister = _cv.CvSubdiv2DPoint_swigregister
CvSubdiv2DPoint_swigregister(CvSubdiv2DPoint)
class CvSubdiv2D(_object):
"""Proxy of C++ CvSubdiv2D class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvSubdiv2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvSubdiv2D, name)
__repr__ = _swig_repr
__swig_setmethods__["flags"] = _cv.CvSubdiv2D_flags_set
__swig_getmethods__["flags"] = _cv.CvSubdiv2D_flags_get
if _newclass:flags = _swig_property(_cv.CvSubdiv2D_flags_get, _cv.CvSubdiv2D_flags_set)
__swig_setmethods__["header_size"] = _cv.CvSubdiv2D_header_size_set
__swig_getmethods__["header_size"] = _cv.CvSubdiv2D_header_size_get
if _newclass:header_size = _swig_property(_cv.CvSubdiv2D_header_size_get, _cv.CvSubdiv2D_header_size_set)
__swig_setmethods__["h_prev"] = _cv.CvSubdiv2D_h_prev_set
__swig_getmethods__["h_prev"] = _cv.CvSubdiv2D_h_prev_get
if _newclass:h_prev = _swig_property(_cv.CvSubdiv2D_h_prev_get, _cv.CvSubdiv2D_h_prev_set)
__swig_setmethods__["h_next"] = _cv.CvSubdiv2D_h_next_set
__swig_getmethods__["h_next"] = _cv.CvSubdiv2D_h_next_get
if _newclass:h_next = _swig_property(_cv.CvSubdiv2D_h_next_get, _cv.CvSubdiv2D_h_next_set)
__swig_setmethods__["v_prev"] = _cv.CvSubdiv2D_v_prev_set
__swig_getmethods__["v_prev"] = _cv.CvSubdiv2D_v_prev_get
if _newclass:v_prev = _swig_property(_cv.CvSubdiv2D_v_prev_get, _cv.CvSubdiv2D_v_prev_set)
__swig_setmethods__["v_next"] = _cv.CvSubdiv2D_v_next_set
__swig_getmethods__["v_next"] = _cv.CvSubdiv2D_v_next_get
if _newclass:v_next = _swig_property(_cv.CvSubdiv2D_v_next_get, _cv.CvSubdiv2D_v_next_set)
__swig_setmethods__["total"] = _cv.CvSubdiv2D_total_set
__swig_getmethods__["total"] = _cv.CvSubdiv2D_total_get
if _newclass:total = _swig_property(_cv.CvSubdiv2D_total_get, _cv.CvSubdiv2D_total_set)
__swig_setmethods__["elem_size"] = _cv.CvSubdiv2D_elem_size_set
__swig_getmethods__["elem_size"] = _cv.CvSubdiv2D_elem_size_get
if _newclass:elem_size = _swig_property(_cv.CvSubdiv2D_elem_size_get, _cv.CvSubdiv2D_elem_size_set)
__swig_setmethods__["block_max"] = _cv.CvSubdiv2D_block_max_set
__swig_getmethods__["block_max"] = _cv.CvSubdiv2D_block_max_get
if _newclass:block_max = _swig_property(_cv.CvSubdiv2D_block_max_get, _cv.CvSubdiv2D_block_max_set)
__swig_setmethods__["ptr"] = _cv.CvSubdiv2D_ptr_set
__swig_getmethods__["ptr"] = _cv.CvSubdiv2D_ptr_get
if _newclass:ptr = _swig_property(_cv.CvSubdiv2D_ptr_get, _cv.CvSubdiv2D_ptr_set)
__swig_setmethods__["delta_elems"] = _cv.CvSubdiv2D_delta_elems_set
__swig_getmethods__["delta_elems"] = _cv.CvSubdiv2D_delta_elems_get
if _newclass:delta_elems = _swig_property(_cv.CvSubdiv2D_delta_elems_get, _cv.CvSubdiv2D_delta_elems_set)
__swig_setmethods__["storage"] = _cv.CvSubdiv2D_storage_set
__swig_getmethods__["storage"] = _cv.CvSubdiv2D_storage_get
if _newclass:storage = _swig_property(_cv.CvSubdiv2D_storage_get, _cv.CvSubdiv2D_storage_set)
__swig_setmethods__["free_blocks"] = _cv.CvSubdiv2D_free_blocks_set
__swig_getmethods__["free_blocks"] = _cv.CvSubdiv2D_free_blocks_get
if _newclass:free_blocks = _swig_property(_cv.CvSubdiv2D_free_blocks_get, _cv.CvSubdiv2D_free_blocks_set)
__swig_setmethods__["first"] = _cv.CvSubdiv2D_first_set
__swig_getmethods__["first"] = _cv.CvSubdiv2D_first_get
if _newclass:first = _swig_property(_cv.CvSubdiv2D_first_get, _cv.CvSubdiv2D_first_set)
__swig_setmethods__["free_elems"] = _cv.CvSubdiv2D_free_elems_set
__swig_getmethods__["free_elems"] = _cv.CvSubdiv2D_free_elems_get
if _newclass:free_elems = _swig_property(_cv.CvSubdiv2D_free_elems_get, _cv.CvSubdiv2D_free_elems_set)
__swig_setmethods__["active_count"] = _cv.CvSubdiv2D_active_count_set
__swig_getmethods__["active_count"] = _cv.CvSubdiv2D_active_count_get
if _newclass:active_count = _swig_property(_cv.CvSubdiv2D_active_count_get, _cv.CvSubdiv2D_active_count_set)
__swig_setmethods__["quad_edges"] = _cv.CvSubdiv2D_quad_edges_set
__swig_getmethods__["quad_edges"] = _cv.CvSubdiv2D_quad_edges_get
if _newclass:quad_edges = _swig_property(_cv.CvSubdiv2D_quad_edges_get, _cv.CvSubdiv2D_quad_edges_set)
__swig_setmethods__["is_geometry_valid"] = _cv.CvSubdiv2D_is_geometry_valid_set
__swig_getmethods__["is_geometry_valid"] = _cv.CvSubdiv2D_is_geometry_valid_get
if _newclass:is_geometry_valid = _swig_property(_cv.CvSubdiv2D_is_geometry_valid_get, _cv.CvSubdiv2D_is_geometry_valid_set)
__swig_setmethods__["recent_edge"] = _cv.CvSubdiv2D_recent_edge_set
__swig_getmethods__["recent_edge"] = _cv.CvSubdiv2D_recent_edge_get
if _newclass:recent_edge = _swig_property(_cv.CvSubdiv2D_recent_edge_get, _cv.CvSubdiv2D_recent_edge_set)
__swig_setmethods__["topleft"] = _cv.CvSubdiv2D_topleft_set
__swig_getmethods__["topleft"] = _cv.CvSubdiv2D_topleft_get
if _newclass:topleft = _swig_property(_cv.CvSubdiv2D_topleft_get, _cv.CvSubdiv2D_topleft_set)
__swig_setmethods__["bottomright"] = _cv.CvSubdiv2D_bottomright_set
__swig_getmethods__["bottomright"] = _cv.CvSubdiv2D_bottomright_get
if _newclass:bottomright = _swig_property(_cv.CvSubdiv2D_bottomright_get, _cv.CvSubdiv2D_bottomright_set)
__swig_setmethods__["edges"] = _cv.CvSubdiv2D_edges_set
__swig_getmethods__["edges"] = _cv.CvSubdiv2D_edges_get
if _newclass:edges = _swig_property(_cv.CvSubdiv2D_edges_get, _cv.CvSubdiv2D_edges_set)
def typed_edges_get(*args):
"""typed_edges_get(self) -> CvSeq_CvQuadEdge2D"""
return _cv.CvSubdiv2D_typed_edges_get(*args)
def typed_edges_set(*args):
"""typed_edges_set(self, CvSeq_CvQuadEdge2D ?)"""
return _cv.CvSubdiv2D_typed_edges_set(*args)
def __iter__(self):
s = CvSeq_QuadEdge2D.cast(self)
for i in range(s.total):
yield s[i]
def __init__(self, *args):
"""__init__(self) -> CvSubdiv2D"""
this = _cv.new_CvSubdiv2D(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvSubdiv2D
__del__ = lambda self : None;
CvSubdiv2D_swigregister = _cv.CvSubdiv2D_swigregister
CvSubdiv2D_swigregister(CvSubdiv2D)
CV_PTLOC_ERROR = _cv.CV_PTLOC_ERROR
CV_PTLOC_OUTSIDE_RECT = _cv.CV_PTLOC_OUTSIDE_RECT
CV_PTLOC_INSIDE = _cv.CV_PTLOC_INSIDE
CV_PTLOC_VERTEX = _cv.CV_PTLOC_VERTEX
CV_PTLOC_ON_EDGE = _cv.CV_PTLOC_ON_EDGE
CV_NEXT_AROUND_ORG = _cv.CV_NEXT_AROUND_ORG
CV_NEXT_AROUND_DST = _cv.CV_NEXT_AROUND_DST
CV_PREV_AROUND_ORG = _cv.CV_PREV_AROUND_ORG
CV_PREV_AROUND_DST = _cv.CV_PREV_AROUND_DST
CV_NEXT_AROUND_LEFT = _cv.CV_NEXT_AROUND_LEFT
CV_NEXT_AROUND_RIGHT = _cv.CV_NEXT_AROUND_RIGHT
CV_PREV_AROUND_LEFT = _cv.CV_PREV_AROUND_LEFT
CV_PREV_AROUND_RIGHT = _cv.CV_PREV_AROUND_RIGHT
CV_GAUSSIAN_5x5 = _cv.CV_GAUSSIAN_5x5
class CvMatrix3(_object):
"""Proxy of C++ CvMatrix3 class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvMatrix3, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvMatrix3, name)
__repr__ = _swig_repr
__swig_setmethods__["m"] = _cv.CvMatrix3_m_set
__swig_getmethods__["m"] = _cv.CvMatrix3_m_get
if _newclass:m = _swig_property(_cv.CvMatrix3_m_get, _cv.CvMatrix3_m_set)
def __init__(self, *args):
"""__init__(self) -> CvMatrix3"""
this = _cv.new_CvMatrix3(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvMatrix3
__del__ = lambda self : None;
CvMatrix3_swigregister = _cv.CvMatrix3_swigregister
CvMatrix3_swigregister(CvMatrix3)
class CvConDensation(_object):
"""Proxy of C++ CvConDensation class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvConDensation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvConDensation, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_setmethods__["MP"] = _cv.CvConDensation_MP_set
__swig_getmethods__["MP"] = _cv.CvConDensation_MP_get
if _newclass:MP = _swig_property(_cv.CvConDensation_MP_get, _cv.CvConDensation_MP_set)
__swig_setmethods__["DP"] = _cv.CvConDensation_DP_set
__swig_getmethods__["DP"] = _cv.CvConDensation_DP_get
if _newclass:DP = _swig_property(_cv.CvConDensation_DP_get, _cv.CvConDensation_DP_set)
__swig_setmethods__["DynamMatr"] = _cv.CvConDensation_DynamMatr_set
__swig_getmethods__["DynamMatr"] = _cv.CvConDensation_DynamMatr_get
if _newclass:DynamMatr = _swig_property(_cv.CvConDensation_DynamMatr_get, _cv.CvConDensation_DynamMatr_set)
__swig_setmethods__["State"] = _cv.CvConDensation_State_set
__swig_getmethods__["State"] = _cv.CvConDensation_State_get
if _newclass:State = _swig_property(_cv.CvConDensation_State_get, _cv.CvConDensation_State_set)
__swig_setmethods__["SamplesNum"] = _cv.CvConDensation_SamplesNum_set
__swig_getmethods__["SamplesNum"] = _cv.CvConDensation_SamplesNum_get
if _newclass:SamplesNum = _swig_property(_cv.CvConDensation_SamplesNum_get, _cv.CvConDensation_SamplesNum_set)
__swig_setmethods__["flSamples"] = _cv.CvConDensation_flSamples_set
__swig_getmethods__["flSamples"] = _cv.CvConDensation_flSamples_get
if _newclass:flSamples = _swig_property(_cv.CvConDensation_flSamples_get, _cv.CvConDensation_flSamples_set)
__swig_setmethods__["flNewSamples"] = _cv.CvConDensation_flNewSamples_set
__swig_getmethods__["flNewSamples"] = _cv.CvConDensation_flNewSamples_get
if _newclass:flNewSamples = _swig_property(_cv.CvConDensation_flNewSamples_get, _cv.CvConDensation_flNewSamples_set)
__swig_setmethods__["flConfidence"] = _cv.CvConDensation_flConfidence_set
__swig_getmethods__["flConfidence"] = _cv.CvConDensation_flConfidence_get
if _newclass:flConfidence = _swig_property(_cv.CvConDensation_flConfidence_get, _cv.CvConDensation_flConfidence_set)
__swig_setmethods__["flCumulative"] = _cv.CvConDensation_flCumulative_set
__swig_getmethods__["flCumulative"] = _cv.CvConDensation_flCumulative_get
if _newclass:flCumulative = _swig_property(_cv.CvConDensation_flCumulative_get, _cv.CvConDensation_flCumulative_set)
__swig_setmethods__["Temp"] = _cv.CvConDensation_Temp_set
__swig_getmethods__["Temp"] = _cv.CvConDensation_Temp_get
if _newclass:Temp = _swig_property(_cv.CvConDensation_Temp_get, _cv.CvConDensation_Temp_set)
__swig_setmethods__["RandomSample"] = _cv.CvConDensation_RandomSample_set
__swig_getmethods__["RandomSample"] = _cv.CvConDensation_RandomSample_get
if _newclass:RandomSample = _swig_property(_cv.CvConDensation_RandomSample_get, _cv.CvConDensation_RandomSample_set)
__swig_setmethods__["RandS"] = _cv.CvConDensation_RandS_set
__swig_getmethods__["RandS"] = _cv.CvConDensation_RandS_get
if _newclass:RandS = _swig_property(_cv.CvConDensation_RandS_get, _cv.CvConDensation_RandS_set)
__swig_destroy__ = _cv.delete_CvConDensation
__del__ = lambda self : None;
CvConDensation_swigregister = _cv.CvConDensation_swigregister
CvConDensation_swigregister(CvConDensation)
class CvKalman(_object):
"""Proxy of C++ CvKalman class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvKalman, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvKalman, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_setmethods__["MP"] = _cv.CvKalman_MP_set
__swig_getmethods__["MP"] = _cv.CvKalman_MP_get
if _newclass:MP = _swig_property(_cv.CvKalman_MP_get, _cv.CvKalman_MP_set)
__swig_setmethods__["DP"] = _cv.CvKalman_DP_set
__swig_getmethods__["DP"] = _cv.CvKalman_DP_get
if _newclass:DP = _swig_property(_cv.CvKalman_DP_get, _cv.CvKalman_DP_set)
__swig_setmethods__["CP"] = _cv.CvKalman_CP_set
__swig_getmethods__["CP"] = _cv.CvKalman_CP_get
if _newclass:CP = _swig_property(_cv.CvKalman_CP_get, _cv.CvKalman_CP_set)
__swig_setmethods__["PosterState"] = _cv.CvKalman_PosterState_set
__swig_getmethods__["PosterState"] = _cv.CvKalman_PosterState_get
if _newclass:PosterState = _swig_property(_cv.CvKalman_PosterState_get, _cv.CvKalman_PosterState_set)
__swig_setmethods__["PriorState"] = _cv.CvKalman_PriorState_set
__swig_getmethods__["PriorState"] = _cv.CvKalman_PriorState_get
if _newclass:PriorState = _swig_property(_cv.CvKalman_PriorState_get, _cv.CvKalman_PriorState_set)
__swig_setmethods__["DynamMatr"] = _cv.CvKalman_DynamMatr_set
__swig_getmethods__["DynamMatr"] = _cv.CvKalman_DynamMatr_get
if _newclass:DynamMatr = _swig_property(_cv.CvKalman_DynamMatr_get, _cv.CvKalman_DynamMatr_set)
__swig_setmethods__["MeasurementMatr"] = _cv.CvKalman_MeasurementMatr_set
__swig_getmethods__["MeasurementMatr"] = _cv.CvKalman_MeasurementMatr_get
if _newclass:MeasurementMatr = _swig_property(_cv.CvKalman_MeasurementMatr_get, _cv.CvKalman_MeasurementMatr_set)
__swig_setmethods__["MNCovariance"] = _cv.CvKalman_MNCovariance_set
__swig_getmethods__["MNCovariance"] = _cv.CvKalman_MNCovariance_get
if _newclass:MNCovariance = _swig_property(_cv.CvKalman_MNCovariance_get, _cv.CvKalman_MNCovariance_set)
__swig_setmethods__["PNCovariance"] = _cv.CvKalman_PNCovariance_set
__swig_getmethods__["PNCovariance"] = _cv.CvKalman_PNCovariance_get
if _newclass:PNCovariance = _swig_property(_cv.CvKalman_PNCovariance_get, _cv.CvKalman_PNCovariance_set)
__swig_setmethods__["KalmGainMatr"] = _cv.CvKalman_KalmGainMatr_set
__swig_getmethods__["KalmGainMatr"] = _cv.CvKalman_KalmGainMatr_get
if _newclass:KalmGainMatr = _swig_property(_cv.CvKalman_KalmGainMatr_get, _cv.CvKalman_KalmGainMatr_set)
__swig_setmethods__["PriorErrorCovariance"] = _cv.CvKalman_PriorErrorCovariance_set
__swig_getmethods__["PriorErrorCovariance"] = _cv.CvKalman_PriorErrorCovariance_get
if _newclass:PriorErrorCovariance = _swig_property(_cv.CvKalman_PriorErrorCovariance_get, _cv.CvKalman_PriorErrorCovariance_set)
__swig_setmethods__["PosterErrorCovariance"] = _cv.CvKalman_PosterErrorCovariance_set
__swig_getmethods__["PosterErrorCovariance"] = _cv.CvKalman_PosterErrorCovariance_get
if _newclass:PosterErrorCovariance = _swig_property(_cv.CvKalman_PosterErrorCovariance_get, _cv.CvKalman_PosterErrorCovariance_set)
__swig_setmethods__["Temp1"] = _cv.CvKalman_Temp1_set
__swig_getmethods__["Temp1"] = _cv.CvKalman_Temp1_get
if _newclass:Temp1 = _swig_property(_cv.CvKalman_Temp1_get, _cv.CvKalman_Temp1_set)
__swig_setmethods__["Temp2"] = _cv.CvKalman_Temp2_set
__swig_getmethods__["Temp2"] = _cv.CvKalman_Temp2_get
if _newclass:Temp2 = _swig_property(_cv.CvKalman_Temp2_get, _cv.CvKalman_Temp2_set)
__swig_setmethods__["state_pre"] = _cv.CvKalman_state_pre_set
__swig_getmethods__["state_pre"] = _cv.CvKalman_state_pre_get
if _newclass:state_pre = _swig_property(_cv.CvKalman_state_pre_get, _cv.CvKalman_state_pre_set)
__swig_setmethods__["state_post"] = _cv.CvKalman_state_post_set
__swig_getmethods__["state_post"] = _cv.CvKalman_state_post_get
if _newclass:state_post = _swig_property(_cv.CvKalman_state_post_get, _cv.CvKalman_state_post_set)
__swig_setmethods__["transition_matrix"] = _cv.CvKalman_transition_matrix_set
__swig_getmethods__["transition_matrix"] = _cv.CvKalman_transition_matrix_get
if _newclass:transition_matrix = _swig_property(_cv.CvKalman_transition_matrix_get, _cv.CvKalman_transition_matrix_set)
__swig_setmethods__["control_matrix"] = _cv.CvKalman_control_matrix_set
__swig_getmethods__["control_matrix"] = _cv.CvKalman_control_matrix_get
if _newclass:control_matrix = _swig_property(_cv.CvKalman_control_matrix_get, _cv.CvKalman_control_matrix_set)
__swig_setmethods__["measurement_matrix"] = _cv.CvKalman_measurement_matrix_set
__swig_getmethods__["measurement_matrix"] = _cv.CvKalman_measurement_matrix_get
if _newclass:measurement_matrix = _swig_property(_cv.CvKalman_measurement_matrix_get, _cv.CvKalman_measurement_matrix_set)
__swig_setmethods__["process_noise_cov"] = _cv.CvKalman_process_noise_cov_set
__swig_getmethods__["process_noise_cov"] = _cv.CvKalman_process_noise_cov_get
if _newclass:process_noise_cov = _swig_property(_cv.CvKalman_process_noise_cov_get, _cv.CvKalman_process_noise_cov_set)
__swig_setmethods__["measurement_noise_cov"] = _cv.CvKalman_measurement_noise_cov_set
__swig_getmethods__["measurement_noise_cov"] = _cv.CvKalman_measurement_noise_cov_get
if _newclass:measurement_noise_cov = _swig_property(_cv.CvKalman_measurement_noise_cov_get, _cv.CvKalman_measurement_noise_cov_set)
__swig_setmethods__["error_cov_pre"] = _cv.CvKalman_error_cov_pre_set
__swig_getmethods__["error_cov_pre"] = _cv.CvKalman_error_cov_pre_get
if _newclass:error_cov_pre = _swig_property(_cv.CvKalman_error_cov_pre_get, _cv.CvKalman_error_cov_pre_set)
__swig_setmethods__["gain"] = _cv.CvKalman_gain_set
__swig_getmethods__["gain"] = _cv.CvKalman_gain_get
if _newclass:gain = _swig_property(_cv.CvKalman_gain_get, _cv.CvKalman_gain_set)
__swig_setmethods__["error_cov_post"] = _cv.CvKalman_error_cov_post_set
__swig_getmethods__["error_cov_post"] = _cv.CvKalman_error_cov_post_get
if _newclass:error_cov_post = _swig_property(_cv.CvKalman_error_cov_post_get, _cv.CvKalman_error_cov_post_set)
__swig_setmethods__["temp1"] = _cv.CvKalman_temp1_set
__swig_getmethods__["temp1"] = _cv.CvKalman_temp1_get
if _newclass:temp1 = _swig_property(_cv.CvKalman_temp1_get, _cv.CvKalman_temp1_set)
__swig_setmethods__["temp2"] = _cv.CvKalman_temp2_set
__swig_getmethods__["temp2"] = _cv.CvKalman_temp2_get
if _newclass:temp2 = _swig_property(_cv.CvKalman_temp2_get, _cv.CvKalman_temp2_set)
__swig_setmethods__["temp3"] = _cv.CvKalman_temp3_set
__swig_getmethods__["temp3"] = _cv.CvKalman_temp3_get
if _newclass:temp3 = _swig_property(_cv.CvKalman_temp3_get, _cv.CvKalman_temp3_set)
__swig_setmethods__["temp4"] = _cv.CvKalman_temp4_set
__swig_getmethods__["temp4"] = _cv.CvKalman_temp4_get
if _newclass:temp4 = _swig_property(_cv.CvKalman_temp4_get, _cv.CvKalman_temp4_set)
__swig_setmethods__["temp5"] = _cv.CvKalman_temp5_set
__swig_getmethods__["temp5"] = _cv.CvKalman_temp5_get
if _newclass:temp5 = _swig_property(_cv.CvKalman_temp5_get, _cv.CvKalman_temp5_set)
__swig_destroy__ = _cv.delete_CvKalman
__del__ = lambda self : None;
CvKalman_swigregister = _cv.CvKalman_swigregister
CvKalman_swigregister(CvKalman)
class CvHaarFeature(_object):
"""Proxy of C++ CvHaarFeature class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvHaarFeature, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvHaarFeature, name)
__repr__ = _swig_repr
__swig_setmethods__["tilted"] = _cv.CvHaarFeature_tilted_set
__swig_getmethods__["tilted"] = _cv.CvHaarFeature_tilted_get
if _newclass:tilted = _swig_property(_cv.CvHaarFeature_tilted_get, _cv.CvHaarFeature_tilted_set)
__swig_getmethods__["rect"] = _cv.CvHaarFeature_rect_get
if _newclass:rect = _swig_property(_cv.CvHaarFeature_rect_get)
def __init__(self, *args):
"""__init__(self) -> CvHaarFeature"""
this = _cv.new_CvHaarFeature(*args)
| |
<reponame>mjaquiery/legUp
"""
Updated on Sun Feb 25 15:26 2018
- tkinter library used to enable file selection dialogues for loading data and saving output
- options now specified with a dialogue box rather than a command line
Created on Thu Feb 06 17:29:27 2015
This program takes csv files containing voltage readings
(20,000 of them per file) and removes the extraneous and noisy data leaving
just the key points - those denoting the peak and trough of each spike.
The output is written to a new file.
The output csv file has the following format:
File, mV minimum, mV maximum, Hz
It's been useful to me if not anyone else.
Please note:
* The lowest range (default 0-50mV) of spike amplitudes is very, very noise-
prone. This means that the data, where it appears, should probably not be
trusted.
* This program has very little error handling and is liable to crash without
explanation if given bad data (such as csv files containing certain data). For
the envisioned use cases it should be okay but no promises are made.
Feel free to use, modify, etc. this code as you see fit.
@author: mj261
"""
import csv, math, os, re, time
import tkinter as tk
from tkinter import filedialog, messagebox, simpledialog
"""
Function definitions.
The actual program gets more comments, promise.
"""
# writes a list to a file - UNUSED
def writeStuff(aList, anotherList, fileToWriteTo):
i = 0
limit = len(aList)
with open(fileToWriteTo, 'wt', newline = '') as file:
write = csv.writer(file)
while i < limit:
data = [str(aList[i]), str(anotherList[i])]
write.writerow(data)
i += 1
file.close()
return
# returns a list of non-'clean_' CSV files for inspection
def getCSVs(folderPath):
returnlist = []
i = 0
print("Searching for .csv files in " + folderPath)
for dirpath, dirnames, files in os.walk(folderPath):
for file in files:
if re.match('^(?!clean_).+\\.csv$', file):
returnlist.append(os.path.join(dirpath, file))
i += 1
print("Found " + str(i) + " files.")
return returnlist
# returns two lists, "stimulus" and "response"
# stimulus is a list of points in the 2nd column
# response is a list of ponts in the 1st column
def getDataTuple(aFile):
retList = []
with open(aFile, 'rt', encoding='utf-8-sig') as csvfile:
myfile = csv.reader(csvfile)
for line in myfile:
tup = float(line[1]), float(line[0])
retList.append(tup)
return retList
# Extracts the stimulus data
def getStimulusList(tupleList):
stimulusList = []
if hasattr(tupleList, '__iter__'):
for tup in dataList:
stimulusList.append(tup[0])
return stimulusList
# Extracts the response data
def getResponseList(tupleList):
responseList = []
if hasattr(tupleList, '__iter__'):
for tup in dataList:
responseList.append(tup[1])
return responseList
# returns the mean of a list of floats
def getMean(aList):
mean = 0.0
i = 0
for value in aList:
i += 1
mean += value
return (mean / i)
# takes a list and an average and returns a list with 0s where the first list's
# value is lower than or equal to average and 1s where higher
def getZeroList(aList, aMean):
retList = []
for value in aList:
if value > aMean:
retList.append(1.0)
else:
retList.append(0.0)
return retList
# returns a list where each value has been multiplied by a corresponding value
# in another list
def flatten(aList, zeroList, replaceWith = 0.0):
i = 0
limit = len(aList)
if limit != len(zeroList):
return
while i < limit:
if zeroList[i] == 0.0:
aList[i] = replaceWith
else:
aList[i] = aList[i]
i += 1
return aList
# returns aList filled with values smoothed over the last num values
def smooth(aList, numValues = 5):
stack = []
outList = []
for value in aList:
if len(stack) >= numValues:
stack.pop(numValues-1)
stack.insert(0,value)
outList.append(sum(stack)/len(stack))
return outList
# returns the modal average of a list
def getMode(aList):
values = {}
for score in aList:
if score not in values:
values[score] = 1
else:
values[score] += 1
mode = 0
for score in values:
if values[score] > mode:
mode = score
return mode
# splits a list along the mean, then assigns all values to the mean of their group
def split(aList):
if len(aList) <= 0:
return aList
mean = sum(aList)/len(aList)
low = []
high = []
for value in aList:
if value > mean:
high.append(value)
elif value < mean:
low.append(value)
lowMode = getMode(low)
highMode = getMode(high)
i = 0
limit = len(aList)
while i < limit:
if aList[i] > mean:
aList[i] = highMode
elif aList[i] < mean:
aList[i] = lowMode
i += 1
return aList
# returns a list containing only the high and low points of spikes
def findExtremes(spikelist, average = 0.0):
returnList = []
i = 0
pending = average, i
lastval = average
# i = 0
mode = 0 # 1: expecting larger; -1: expecting smaller
# loop through looking for extreme points
for lineval in spikelist:
lineval = float(lineval)
# find the high point
if mode == 1:
if lineval > pending[0]:
#print("Pending: " + str(lineval))
pending = lineval, i
# find the low point
elif mode == -1:
if lineval < pending[0]:
#print("Pending: " + str(lineval))
pending = lineval, i
# work out whether to look for high or low values
if mode == 0:
if lineval > average:
mode = 1
if lineval < average:
mode = -1
pending = lineval, i
# when crossing the origin switch modes
elif math.copysign(1.0, (lineval - average)) != math.copysign(1.0, (lastval - average)):
if pending[0] != average:
returnList[pending[1]] = pending[0]
pending = lineval, i
if mode == -1:
mode = 1
#print("Now looking for high point.")
else:
mode = -1
#print("Now looking for low point.")
returnList.append(average)
lastval = lineval
i += 1
return returnList
class spike:
startPoint = 0
startValue = 0
endValue = 0
endPoint = 0
stimulusLevel = 0
number = 0
timeSinceLastSpike = 0
def getDuration(self):
return (self.endPoint - self.startPoint)
def getAmplitude(self):
return (math.copysign(self.endValue, 1) + math.copysign(self.startValue, 1))
# finds pairs within a list which constitute spikes and returns a spike object
def getNextSpike(spikeList, listMean, searchFrom, lastSpikeTime, relativeSizeLimit = 0.5, maxDuration = 40):
mySpike = spike()
mySpike.startValue = listMean
i = searchFrom
limit = i + maxDuration
defaultOut = False
if limit > len(spikeList):
limit = len(spikeList)
else:
defaultOut = limit
while i < limit:
value = spikeList[i]
# first non-mean value is our peak
if value != listMean:
if mySpike.startValue == listMean:
mySpike.startValue = value
mySpike.startPoint = i
# second non-mean value is our prospective match
elif value < listMean < mySpike.startValue or value > listMean > mySpike.startValue:
posval = math.copysign(value - listMean, 1)
posstart = math.copysign(mySpike.startValue - listMean, 1)
if posval > posstart * (1-relativeSizeLimit) and posval < posstart * (1+relativeSizeLimit):
mySpike.endPoint = i
mySpike.endValue = value
# calculate timeSinceLastSpike
timeSince = mySpike.startPoint - lastSpikeTime
if lastSpikeTime == 0:
timeSince = 0.0
mySpike.timeSinceLastSpike = timeSince
return mySpike
# if it didn't made a spike we can stop
else:
return i
i += 1
# if we didn't find a 2nd non-mean point in the list then return False
return defaultOut
# fills in the stimulus level and number variables for all spikes in aList
def fillValues(aList, stimList):
i = 1
for aSpike in aList:
aSpike.stimulusLevel = stimList[aSpike.startPoint]
aSpike.number = i
i += 1
# Writes data in a nice csv format for analysis to output to a spreadsheet
def writeSpikeToCSV(aSpike, fromFile, outputFile):
headersWritten = os.path.isfile(outputFile)
with open(outputFile, 'at', newline = '') as file:
write = csv.writer(file)
# Write the headers
if headersWritten == False:
write.writerow(["File", "Spike #", "Stimulus", "Peak Point", "Amplitude", "Duration (ms)", "Time Since Last Spike"])
rowToWrite = [fromFile, aSpike.number, aSpike.stimulusLevel, aSpike.startPoint+1,
aSpike.getAmplitude(), aSpike.getDuration()/2, aSpike.timeSinceLastSpike]
write.writerow(rowToWrite)
file.close()
# If the file already exists get a new name for the output file
def promptForFileName(folder, outputFileName):
while os.path.isfile(os.path.join(folder, outputFileName)):
print("Output file already exists (\"" + outputFileName + "\")")
outputFileName = input("Please enter a new name for the output file: ")
if outputFileName[-4:] != ".csv":
outputFileName += ".csv"
if outputFileName[:6] != "clean_":
outputFileName = "clean_" + outputFileName
return outputFileName
class MyDialog(simpledialog.Dialog):
def body(self, master):
self.answered = False
self.title("legUp")
simpledialog.Label(master, text="queries -> <EMAIL>") \
.grid(row=0, column=1, columnspan=2)
intro = "Welcome to | |
order_by:
:param limit:
:return: A Pandas dataframe
"""
result_list = self.get_nodes(match=match, order_by=order_by, limit=limit)
return pd.DataFrame(result_list)
def find(self, labels=None, neo_id=None, key_name=None, key_value=None, properties=None, subquery=None,
dummy_node_name="n") -> dict:
"""
Register a set of conditions that must be matched to identify a node or nodes of interest,
and return a dictionary suitable to be passed as argument to various other functions in this library.
No arguments at all means "match everything in the database".
IMPORTANT: if neo_id is provided, all other conditions are DISREGARDED;
otherwise, an implicit AND applies to all the specified conditions.
Note: NO database operation is actually performed by this function.
It merely turns the set of specification into the MATCH part, and (if applicable) the WHERE part,
of a Cypher query (using the specified dummy variable name),
together with its data-binding dictionary - all "packaged" into a dict that can be passed around.
The calling functions typically will make use the returned dictionary to assemble a Cypher query,
to MATCH all the Neo4j nodes satisfying the specified conditions,
and then do whatever else it needs to do (such as deleting, or setting properties on, the located nodes.)
EXAMPLE 1 - first identify a group of nodes, and then delete them:
match = find(labels="person", properties={"gender": "F"},
subquery=("n.income > $min_income" , {"min_income": 50000})}
)
delete_nodes(match)
In the above example, the value of match is:
{"node": "(n :`person` {`gender`: $n_par_1})",
"where": "n.income > $min_income",
"data_binding": {"n_par_1": "F", "min_income": 50000},
"dummy_node_name": "n"
}
EXAMPLE 2 - by specifying the name of the dummy node, it's also possible to do operations such as:
# Add the new relationship
match_from = db.find(labels="car", key_name="vehicle_id", key_value=45678,
dummy_node_name="from")
match_to = db.find(labels="manufacturer", key_name="company", key_value="Toyota",
dummy_node_name="to")
db.add_edges(match_from, match_to, rel_name="MADE_BY")
TODO? - possible alt. names for this function include "define_match()", match(), "locate(), choose() or identify()
ALL THE ARGUMENTS ARE OPTIONAL (no arguments at all means "match everything in the database")
:param labels: A string (or list/tuple of strings) specifying one or more Neo4j labels.
(Note: blank spaces ARE allowed in the strings)
EXAMPLES: "cars"
("cars", "powered vehicles")
Note that if multiple labels are given, then only nodes with ALL of them will be matched;
at present, there's no way to request an "OR" operation
:param neo_id: An integer with the node's internal ID.
If specified, it OVER-RIDES all the remaining arguments, except for the labels
:param key_name: A string with the name of a node attribute; if provided, key_value must be present, too
:param key_value: The required value for the above key; if provided, key_name must be present, too
Note: no requirement for the key to be primary
:param properties: A (possibly-empty) dictionary of property key/values pairs, indicating a condition to match.
EXAMPLE: {"gender": "F", "age": 22}
:param subquery: Either None, or a (possibly empty) string containing a Cypher subquery,
or a pair/list (string, dict) containing a Cypher subquery and the data-binding dictionary for it.
The Cypher subquery should refer to the node using the assigned dummy_node_name (by default, "n")
IMPORTANT: in the dictionary, don't use keys of the form "n_par_i",
where n is the dummy node name and i is an integer,
or an Exception will be raised - those names are for internal use only
EXAMPLES: "n.age < 25 AND n.income > 100000"
("n.weight < $max_weight", {"max_weight": 100})
:param dummy_node_name: A string with a name by which to refer to the node (by default, "n")
:return: A dictionary of data storing the parameters of the match.
For details, see the "class Matches"
"""
if self.debug:
print(f"In find(). labels={labels}, neo_id={neo_id}, key_name={key_name}, key_value={key_value}, "
f"properties={properties}, subquery={subquery}, dummy_node_name={dummy_node_name}")
match_structure = CypherUtils.define_match(labels=labels, neo_id=neo_id, key_name=key_name, key_value=key_value,
properties=properties, subquery=subquery, dummy_node_name=dummy_node_name)
if self.debug:
print("\n*** match_structure : ", match_structure)
return match_structure
def get_node_labels(self, neo4j_id: int) -> [str]:
"""
Return a list whose elements are the label(s) of the node specified by its Neo4j internal ID
TODO: maybe also accept a "match" structure as argument
:param neo4j_id: An integer with a Neo4j node id
:return:
"""
assert type(neo4j_id) == int and neo4j_id >= 0, \
"The argument of get_node_labels() must be a non-negative integer"
q = "MATCH (n) WHERE id(n)=$neo4j_id RETURN labels(n) AS all_labels"
return self.query(q, data_binding={"neo4j_id": neo4j_id}, single_cell="all_labels")
#---------------------------------------------------------------------------------------------------#
# #
# ~ FOLLOW LINKS ~ #
# #
#___________________________________________________________________________________________________#
def follow_links(self, match: Union[int, dict], rel_name: str, rel_dir ="OUT", neighbor_labels = None) -> [dict]:
"""
From the given starting node(s), follow all the relationships of the given name to and/or from it,
into/from neighbor nodes (optionally having the given labels),
and return all the properties of those neighbor nodes.
:param match: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:param rel_name: A string with the name of relationship to follow. (Note: any other relationships are ignored)
:param rel_dir: Either "OUT"(default), "IN" or "BOTH". Direction(s) of the relationship to follow
:param neighbor_labels: Optional label(s) required on the neighbors. If present, either a string or list of strings
:return: A list of dictionaries with all the properties of the neighbor nodes
TODO: maybe add the option to just return a subset of fields
"""
#CypherUtils.assert_valid_match_structure(match) # Validate the match dictionary
match = CypherUtils.validate_and_standardize(match) # Validate, and possibly create, the match dictionary
# Unpack needed values from the match dictionary
(node, where, data_binding) = CypherUtils.unpack_match(match, include_dummy=False)
neighbor_labels_str = CypherUtils.prepare_labels(neighbor_labels) # EXAMPLE: ":`CAR`:`INVENTORY`"
if rel_dir == "OUT": # Follow outbound links
q = f"MATCH {node} - [:{rel_name}] -> (neighbor {neighbor_labels_str})"
elif rel_dir == "IN": # Follow inbound links
q = f"MATCH {node} <- [:{rel_name}] - (neighbor {neighbor_labels_str})"
else: # Follow links in BOTH directions
q = f"MATCH {node} - [:{rel_name}] - (neighbor {neighbor_labels_str})"
q += CypherUtils.prepare_where(where) + " RETURN neighbor"
self.debug_print(q, data_binding, "follow_links")
result = self.query(q, data_binding, single_column='neighbor')
return result
def count_links(self, match: Union[int, dict], rel_name: str, rel_dir: str, neighbor_labels = None) -> int:
"""
From the given starting node(s), count all the relationships OF THE GIVEN NAME to and/or from it,
into/from neighbor nodes (optionally having the given labels)
:param match: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:param rel_name: A string with the name of relationship to follow. (Note: any other relationships are ignored)
:param rel_dir: Either "OUT"(default), "IN" or "BOTH". Direction(s) of the relationship to follow
:param neighbor_labels: Optional label(s) required on the neighbors. If present, either a string or list of strings
:return: The total number of inbound and/or outbound relationships to the given node(s)
"""
match = CypherUtils.validate_and_standardize(match) # Validate, and possibly create, the match dictionary
# Unpack needed values from the match dictionary
(node, where, data_binding) = CypherUtils.unpack_match(match, include_dummy=False)
neighbor_labels_str = CypherUtils.prepare_labels(neighbor_labels) # EXAMPLE: ":`CAR`:`INVENTORY`"
if rel_dir == "OUT": # Follow outbound links
q = f"MATCH {node} - [:{rel_name}] -> (neighbor {neighbor_labels_str})"
elif rel_dir == "IN": # Follow inbound links
q = f"MATCH {node} <- [:{rel_name}] - (neighbor {neighbor_labels_str})"
elif rel_dir == "BOTH": # Follow links in BOTH directions
q = f"MATCH {node} - [:{rel_name}] - (neighbor {neighbor_labels_str})"
else:
raise Exception(f"count_links(): argument `rel_dir` must be one of: 'IN', 'OUT', 'BOTH'; value passed was `{rel_dir}`")
q += CypherUtils.prepare_where(where) + " RETURN count(neighbor) AS link_count"
self.debug_print(q, data_binding, "count_links")
return self.query(q, data_binding, single_cell="link_count")
def get_parents_and_children(self, node_id: int) -> ():
"""
Fetch all the nodes connected to the given one by INbound relationships to it (its "parents"),
as well as by OUTbound relationships to it (its "children")
:param node_id: An integer with a Neo4j internal node ID
:return: A dictionary with 2 keys: 'parent_list' and 'child_list'
The values are lists of dictionaries with 3 keys: "id", "label", "rel"
EXAMPLE of individual items in either parent_list or child_list:
{'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}
"""
# Fetch the parents
cypher = f"MATCH (parent)-[inbound]->(n) WHERE id(n) | |
# model_id
self.model_id = model_id
def validate(self):
self.validate_required(self.id, 'id')
self.validate_required(self.model_id, 'model_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.id is not None:
result['id'] = self.id
if self.model_id is not None:
result['model_id'] = self.model_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('model_id') is not None:
self.model_id = m.get('model_id')
return self
class GetItemResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: Item = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# data
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
temp_model = Item()
self.data = temp_model.from_map(m['data'])
return self
class GetModelRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
unique_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 要获取的模型的唯一标识
self.unique_id = unique_id
def validate(self):
self.validate_required(self.unique_id, 'unique_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.unique_id is not None:
result['unique_id'] = self.unique_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
return self
class GetModelResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: Model = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 获取到的模型
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
temp_model = Model()
self.data = temp_model.from_map(m['data'])
return self
class QueryModelRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
def validate(self):
pass
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
return self
class QueryModelResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: List[Model] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 查询到的模型列表
self.data = data
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = Model()
self.data.append(temp_model.from_map(k))
return self
class CreateModelRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
unique_id: str = None,
name: str = None,
category_id: str = None,
description: str = None,
priority: int = None,
display_properties: List[MapStringToStringEntry] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 唯一标识(全局唯一)
self.unique_id = unique_id
# 名称(全局唯一)
self.name = name
# 所属模型分组的唯一标识
self.category_id = category_id
# 描述
self.description = description
# 用于前端展示排序,数字越大优先级越高,默认为 0
self.priority = priority
# 用于前端展示的扩展属性
self.display_properties = display_properties
def validate(self):
self.validate_required(self.unique_id, 'unique_id')
self.validate_required(self.name, 'name')
self.validate_required(self.category_id, 'category_id')
if self.display_properties:
for k in self.display_properties:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.unique_id is not None:
result['unique_id'] = self.unique_id
if self.name is not None:
result['name'] = self.name
if self.category_id is not None:
result['category_id'] = self.category_id
if self.description is not None:
result['description'] = self.description
if self.priority is not None:
result['priority'] = self.priority
result['display_properties'] = []
if self.display_properties is not None:
for k in self.display_properties:
result['display_properties'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('category_id') is not None:
self.category_id = m.get('category_id')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('priority') is not None:
self.priority = m.get('priority')
self.display_properties = []
if m.get('display_properties') is not None:
for k in m.get('display_properties'):
temp_model = MapStringToStringEntry()
self.display_properties.append(temp_model.from_map(k))
return self
class CreateModelResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: Model = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 创建出的模型
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
temp_model = Model()
self.data = temp_model.from_map(m['data'])
return self
class UpdateModelRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
unique_id: str = None,
name: str = None,
description: str = None,
priority: int = None,
display_properties: List[MapStringToStringEntry] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 要更新的模型的唯一标识
self.unique_id = unique_id
# 名称(全局唯一)
self.name = name
# 描述
self.description = description
# 用于前端展示排序,数字越大优先级越高,默认为 0
self.priority = priority
# 用于前端展示的扩展属性
self.display_properties = display_properties
def validate(self):
self.validate_required(self.unique_id, 'unique_id')
self.validate_required(self.name, 'name')
if self.display_properties:
for k in self.display_properties:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.unique_id is not None:
result['unique_id'] = self.unique_id
if self.name is not None:
result['name'] = self.name
if self.description is not None:
result['description'] = self.description
if self.priority is not None:
result['priority'] = self.priority
result['display_properties'] = []
if self.display_properties is not None:
for k in self.display_properties:
result['display_properties'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('priority') is not None:
self.priority = m.get('priority')
self.display_properties = []
if m.get('display_properties') is not None:
for k in m.get('display_properties'):
temp_model = MapStringToStringEntry()
self.display_properties.append(temp_model.from_map(k))
return self
class UpdateModelResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: Model = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 更新后的模型
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = | |
function(opt) {
var viz = this.viz;
var graph = viz.graph;
var animation = this.nodeFxAnimation;
var nodes = $.merge(this.viz.config, {
elements : {
id : false,
properties : {}
},
reposition : false
});
opt = $.merge(nodes, opt || {}, {
/** @type {function (): undefined} */
onBeforeCompute : $.empty,
/** @type {function (): undefined} */
onAfterCompute : $.empty
});
animation.stopTimer();
var props = opt.elements.properties;
if (!opt.elements.id) {
graph.eachNode(function(n) {
var prop;
for (prop in props) {
n.setData(prop, props[prop], "end");
}
});
} else {
var attributes = $.splat(opt.elements.id);
$.each(attributes, function(node) {
var n = graph.getNode(node);
if (n) {
var prop;
for (prop in props) {
n.setData(prop, props[prop], "end");
}
}
});
}
/** @type {Array} */
var assigns = [];
var vvar;
for (vvar in props) {
assigns.push(vvar);
}
/** @type {Array} */
var modes = ["node-property:" + assigns.join(":")];
if (opt.reposition) {
modes.push("linear");
viz.compute("end");
}
this.animate($.merge(opt, {
modes : modes,
type : "nodefx"
}));
},
/**
* @param {?} opt
* @param {boolean} animating
* @return {undefined}
*/
plot : function(opt, animating) {
var viz = this.viz;
var graph = viz.graph;
var canvas = viz.canvas;
var id = viz.root;
var that = this;
var F = canvas.getCtx();
/** @type {function (...[*]): number} */
var min = Math.min;
opt = opt || this.viz.controller;
if (opt.clearCanvas) {
canvas.clear();
}
var n = graph.getNode(id);
if (!n) {
return;
}
/** @type {boolean} */
var T = !!n.visited;
graph.eachNode(function(from) {
var nodeAlpha = from.getData("alpha");
from.eachAdjacency(function(adj) {
var nodeTo = adj.nodeTo;
if (!!nodeTo.visited === T && (from.drawn && nodeTo.drawn)) {
if (!animating) {
opt.onBeforePlotLine(adj);
}
that.plotLine(adj, canvas, animating);
if (!animating) {
opt.onAfterPlotLine(adj);
}
}
});
if (from.drawn) {
if (!animating) {
opt.onBeforePlotNode(from);
}
that.plotNode(from, canvas, animating);
if (!animating) {
opt.onAfterPlotNode(from);
}
}
if (!that.labelsHidden && opt.withLabels) {
if (from.drawn && nodeAlpha >= 0.95) {
that.labels.plotLabel(canvas, from, opt);
} else {
that.labels.hideLabel(from, false);
}
}
/** @type {boolean} */
from.visited = !T;
});
},
/**
* @param {?} npos
* @param {?} opt
* @param {boolean} animating
* @return {undefined}
*/
plotTree : function(npos, opt, animating) {
var that = this;
var viz = this.viz;
var canvas = viz.canvas;
var config = this.config;
var D = canvas.getCtx();
var nodeAlpha = npos.getData("alpha");
npos.eachSubnode(function(elem) {
if (opt.plotSubtree(npos, elem) && (elem.exist && elem.drawn)) {
var from = npos.getAdjacency(elem.id);
if (!animating) {
opt.onBeforePlotLine(from);
}
that.plotLine(from, canvas, animating);
if (!animating) {
opt.onAfterPlotLine(from);
}
that.plotTree(elem, opt, animating);
}
});
if (npos.drawn) {
if (!animating) {
opt.onBeforePlotNode(npos);
}
this.plotNode(npos, canvas, animating);
if (!animating) {
opt.onAfterPlotNode(npos);
}
if (!opt.hideLabels && (opt.withLabels && nodeAlpha >= 0.95)) {
this.labels.plotLabel(canvas, npos, opt);
} else {
this.labels.hideLabel(npos, false);
}
} else {
this.labels.hideLabel(npos, true);
}
},
/**
* @param {?} node
* @param {?} canvas
* @param {boolean} animating
* @return {undefined}
*/
plotNode : function(node, canvas, animating) {
var f = node.getData("type");
var ctxObj = this.node.CanvasStyles;
if (f != "none") {
var width = node.getData("lineWidth");
var color = node.getData("color");
var alpha = node.getData("alpha");
var ctx = canvas.getCtx();
ctx.save();
ctx.lineWidth = width;
ctx.fillStyle = ctx.strokeStyle = color;
ctx.globalAlpha = alpha;
var s;
for (s in ctxObj) {
ctx[s] = node.getCanvasStyle(s);
}
this.nodeTypes[f].render.call(this, node, canvas, animating);
ctx.restore();
}
},
/**
* @param {?} adj
* @param {?} canvas
* @param {boolean} animating
* @return {undefined}
*/
plotLine : function(adj, canvas, animating) {
var f = adj.getData("type");
var ctxObj = this.edge.CanvasStyles;
if (f != "none") {
var width = adj.getData("lineWidth");
var color = adj.getData("color");
var ctx = canvas.getCtx();
var nodeFrom = adj.nodeFrom;
var nodeTo = adj.nodeTo;
ctx.save();
ctx.lineWidth = width;
ctx.fillStyle = ctx.strokeStyle = color;
/** @type {number} */
ctx.globalAlpha = Math.min(nodeFrom.getData("alpha"), nodeTo.getData("alpha"), adj.getData("alpha"));
var s;
for (s in ctxObj) {
ctx[s] = adj.getCanvasStyle(s);
}
this.edgeTypes[f].render.call(this, adj, canvas, animating);
ctx.restore();
}
}
};
Graph.Plot3D = $.merge(Graph.Plot, {
Interpolator : {
/**
* @param {Object} n
* @param {?} diff
* @param {?} qualifier
* @return {undefined}
*/
linear : function(n, diff, qualifier) {
var from = n.startPos.getc(true);
var to = n.endPos.getc(true);
n.pos.setc(this.compute(from.x, to.x, qualifier), this.compute(from.y, to.y, qualifier), this.compute(from.z, to.z, qualifier));
}
},
/**
* @param {Object} node
* @param {Object} canvas
* @return {undefined}
*/
plotNode : function(node, canvas) {
if (node.getData("type") == "none") {
return;
}
this.plotElement(node, canvas, {
/**
* @return {?}
*/
getAlpha : function() {
return node.getData("alpha");
}
});
},
/**
* @param {Object} adj
* @param {Object} canvas
* @return {undefined}
*/
plotLine : function(adj, canvas) {
if (adj.getData("type") == "none") {
return;
}
this.plotElement(adj, canvas, {
/**
* @return {?}
*/
getAlpha : function() {
return Math.min(adj.nodeFrom.getData("alpha"), adj.nodeTo.getData("alpha"), adj.getData("alpha"));
}
});
},
/**
* @param {Object} elem
* @param {Object} canvas
* @param {?} opt_attributes
* @return {undefined}
*/
plotElement : function(elem, canvas, opt_attributes) {
var gl = canvas.getCtx();
var viewMatrix = new Matrix4;
var lighting = canvas.config.Scene.Lighting;
var wcanvas = canvas.canvases[0];
var program = wcanvas.program;
var camera = wcanvas.camera;
if (!elem.geometry) {
elem.geometry = new (O3D[elem.getData("type")]);
}
elem.geometry.update(elem);
if (!elem.webGLVertexBuffer) {
/** @type {Array} */
var normals = [];
/** @type {Array} */
var faces = [];
/** @type {Array} */
var positions = [];
/** @type {number} */
var vertexIndex = 0;
var geom = elem.geometry;
/** @type {number} */
var i = 0;
var vs = geom.vertices;
var fs = geom.faces;
var len = fs.length;
for (;i < len;i++) {
var face = fs[i];
var normal = vs[face.a];
var n = vs[face.b];
var v1 = vs[face.c];
var p0 = face.d ? vs[face.d] : false;
var position = face.normal;
normals.push(normal.x, normal.y, normal.z);
normals.push(n.x, n.y, n.z);
normals.push(v1.x, v1.y, v1.z);
if (p0) {
normals.push(p0.x, p0.y, p0.z);
}
positions.push(position.x, position.y, position.z);
positions.push(position.x, position.y, position.z);
positions.push(position.x, position.y, position.z);
if (p0) {
positions.push(position.x, position.y, position.z);
}
faces.push(vertexIndex, vertexIndex + 1, vertexIndex + 2);
if (p0) {
faces.push(vertexIndex, vertexIndex + 2, vertexIndex + 3);
vertexIndex += 4;
} else {
vertexIndex += 3;
}
}
elem.webGLVertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, elem.webGLVertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(normals), gl.STATIC_DRAW);
elem.webGLFaceBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, elem.webGLFaceBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(faces), gl.STATIC_DRAW);
/** @type {number} */
elem.webGLFaceCount = faces.length;
elem.webGLNormalBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, elem.webGLNormalBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
}
viewMatrix.multiply(camera.matrix, elem.geometry.matrix);
gl.uniformMatrix4fv(program.viewMatrix, false, viewMatrix.flatten());
gl.uniformMatrix4fv(program.projectionMatrix, false, camera.projectionMatrix.flatten());
var normalMatrix = Matrix4.makeInvert(viewMatrix);
normalMatrix.$transpose();
gl.uniformMatrix4fv(program.normalMatrix, false, normalMatrix.flatten());
var color = $.hexToRgb(elem.getData("color"));
color.push(opt_attributes.getAlpha());
gl.uniform4f(program.color, color[0] / 255, color[1] / 255, color[2] / 255, color[3]);
gl.uniform1i(program.enableLighting, lighting.enable);
if (lighting.enable) {
if (lighting.ambient) {
var acolor = lighting.ambient;
gl.uniform3f(program.ambientColor, acolor[0], acolor[1], acolor[2]);
}
if (lighting.directional) {
var dir = lighting.directional;
color = dir.color;
var pos = dir.direction;
var vd = (new Vector3(pos.x, pos.y, pos.z)).normalize().$scale(-1);
gl.uniform3f(program.lightingDirection, vd.x, vd.y, vd.z);
gl.uniform3f(program.directionalColor, color[0], color[1], color[2]);
}
}
gl.bindBuffer(gl.ARRAY_BUFFER, elem.webGLVertexBuffer);
gl.vertexAttribPointer(program.position, 3, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, elem.webGLNormalBuffer);
gl.vertexAttribPointer(program.normal, 3, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, elem.webGLFaceBuffer);
gl.drawElements(gl.TRIANGLES, elem.webGLFaceCount, gl.UNSIGNED_SHORT, 0);
}
});
Graph.Label = {};
Graph.Label.Native = new Class({
/**
* @param {?} viz
* @return {undefined}
*/
initialize : function(viz) {
this.viz = viz;
},
/**
* @param {?} canvas
* @param {Object} node
* @param {?} opt
* @return {undefined}
*/
plotLabel : function(canvas, node, opt) {
var ctx = canvas.getCtx();
var A = node.pos.getc(true);
ctx.font = node.getLabelData("style") + " " + node.getLabelData("size") + "px " + node.getLabelData("family");
ctx.textAlign = node.getLabelData("textAlign");
ctx.fillStyle = ctx.strokeStyle = node.getLabelData("color");
ctx.textBaseline = node.getLabelData("textBaseline");
this.renderLabel(canvas, node, opt);
},
/**
* @param {?} canvas
* @param {Object} node
* @param {?} opt
* @return {undefined}
*/
renderLabel : function(canvas, node, opt) {
var ctx = canvas.getCtx();
var pt = node.pos.getc(true);
ctx.fillText(node.name, pt.x, pt.y + node.getData("height") / 2);
},
/** @type {function (): undefined} */
hideLabel : $.empty,
/** @type {function (): undefined} */
hideLabels : $.empty
});
Graph.Label.DOM = new Class({
labelsHidden : false,
labelContainer : false,
labels : {},
/**
* @return {?}
*/
getLabelContainer : function() {
return this.labelContainer ? this.labelContainer : this.labelContainer = document.getElementById(this.viz.config.labelContainer);
},
/**
* @param {?} id
* @return {?}
*/
getLabel : function(id) | |
#
# Imports (JSON library based on import try)
import sys
from postmark import __version__
try:
from email.mime.base import MIMEBase
except ImportError as e:
from email import MIMEBase
if sys.version_info[0] < 3:
from urllib2 import Request, urlopen, HTTPError, URLError
from httplib import HTTPConnection
from urllib import urlencode
else:
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from http.client import HTTPConnection
from urllib.parse import urlencode
from contextlib import closing
try:
import simplejson as json
except ImportError:
try:
# Try django first, for speedups, since we support the backend
from django.utils import simplejson as json
except ImportError:
import json
#
#
class PMJSONEncoder(json.JSONEncoder):
def default(self, o):
if hasattr(o, '_proxy____unicode_cast'):
return unicode(o)
return super(PMJSONEncoder, self).default(o)
#
#
__POSTMARK_URL__ = 'https://api.postmarkapp.com/'
#
#
class PMMail(object):
'''
The Postmark Mail object.
'''
def __init__(self, **kwargs):
'''
Keyword arguments are:
api_key: Your Postmark server API key
sender: Who the email is coming from, in either
"<EMAIL>" or "First Last <<EMAIL>>" format
to: Who to send the email to, in either
"<EMAIL>" or "First Last <<EMAIL>>" format
Can be multiple values separated by commas (limit 20)
cc: Who to copy the email to, in either
"<EMAIL>" or "First Last <<EMAIL>>" format
Can be multiple values separated by commas (limit 20)
bcc: Who to blind copy the email to, in either
"<EMAIL>" or "First Last <<EMAIL>>" format
Can be multiple values separated by commas (limit 20)
subject: Subject of the email, ignored if using Postmark templates
tag: Use for adding categorizations to your email
html_body: Email message in HTML
text_body: Email message in plain text
track_opens: Whether or not to track if emails were opened or not
custom_headers: A dictionary of key-value pairs of custom headers.
attachments: A list of tuples or email.mime.base.MIMEBase objects describing attachments.
metadata: A dictionary of key-value pairs of custom metadata. Keys and values can only be strings or ints.
template_id: id of Postmark template. See: https://postmarkapp.com/blog/special-delivery-postmark-templates
template_model: a dictionary containing the values to be loaded into the template
'''
# initialize properties
self.__api_key = None
self.__sender = None
self.__reply_to = None
self.__to = None
self.__cc = None
self.__bcc = None
self.__subject = None
self.__tag = None
self.__html_body = None
self.__text_body = None
self.__track_opens = False
self.__custom_headers = {}
self.__attachments = []
self.__message_id = None
# self.__multipart = False
self.__metadata = {}
self.__template_id = None
self.__template_model = None
acceptable_keys = (
'api_key',
'sender',
'reply_to',
'to', 'recipient', # 'recipient' is legacy
'cc',
'bcc',
'subject',
'tag',
'html_body',
'text_body',
'track_opens',
'custom_headers',
'attachments',
# 'multipart',
'metadata',
'template_id',
'template_model'
)
for key in kwargs:
if key in acceptable_keys:
if key == 'recipient':
setattr(self, 'to', kwargs[key])
else:
setattr(self, key, kwargs[key])
# Set up the user-agent
self.__user_agent = 'Python/%s (python-postmark library version %s)' % ('_'.join([str(var) for var in sys.version_info]), __version__)
# Try to pull in the API key from Django
try:
from django import VERSION
from django.conf import settings as django_settings
self.__user_agent = '%s (Django %s)' % (self.__user_agent, '_'.join([str(var) for var in VERSION]))
if not self.__api_key and hasattr(django_settings, 'POSTMARK_API_KEY'):
self.__api_key = django_settings.POSTMARK_API_KEY
if not self.__sender and hasattr(django_settings, 'POSTMARK_SENDER'):
self.__sender = django_settings.POSTMARK_SENDER
if not self.__track_opens and hasattr(django_settings, 'POSTMARK_TRACK_OPENS'):
self.__track_opens = django_settings.POSTMARK_TRACK_OPENS
except ImportError:
pass
@property
def messages(self):
"""Convenience method to mimic batch messages property, return list of self."""
return [self]
#
# Properties
def _set_custom_headers(self, value):
'''
A special set function to ensure
we're setting with a dictionary
'''
if value is None:
setattr(self, '_PMMail__custom_headers', {})
elif isinstance(value, dict):
setattr(self, '_PMMail__custom_headers', value)
else:
raise TypeError('Custom headers must be a dictionary of key-value pairs')
def _set_metadata(self, value):
'''
A special set function to ensure
we're setting with a dictionary
'''
if value is None:
setattr(self, '_PMMail__metadata', {})
elif isinstance(value, dict):
for k, v in value.items():
if (not isinstance(k, str) and not isinstance(k, int)) \
or (not isinstance(v, str) and not isinstance(v, int)):
raise TypeError('Metadata keys and values can only be strings or integers')
setattr(self, '_PMMail__metadata', value)
else:
raise TypeError('Metadata must be a dictionary of key-value pairs')
def _set_attachments(self, value):
'''
A special set function to ensure
we're setting with a list
'''
if value is None:
setattr(self, '_PMMail__attachments', [])
elif isinstance(value, list):
setattr(self, '_PMMail__attachments', value)
else:
raise TypeError('Attachments must be a list')
api_key = property(
lambda self: self.__api_key,
lambda self, value: setattr(self, '_PMMail__api_key', value),
lambda self: setattr(self, '_PMMail__api_key', None),
'''
The API Key for your rack server on Postmark
'''
)
# "from" is a reserved word
sender = property(
lambda self: self.__sender,
lambda self, value: setattr(self, '_PMMail__sender', value),
lambda self: setattr(self, '_PMMail__sender', None),
'''
The sender, in either "<EMAIL>" or "<NAME> <<EMAIL>>" formats.
The address should match one of your Sender Signatures in Postmark.
Specifying the address in the second fashion will allow you to replace
the name of the sender as it appears in the recipient's email client.
'''
)
reply_to = property(
lambda self: self.__reply_to,
lambda self, value: setattr(self, '_PMMail__reply_to', value),
lambda self: setattr(self, '_PMMail__reply_to', None),
'''
A reply-to address, in either "<EMAIL>" or "<NAME> <<EMAIL>>"
format. The reply-to address does not have to be one of your Sender Signatures in Postmark.
This allows a different reply-to address than sender address.
'''
)
to = property(
lambda self: self.__to,
lambda self, value: setattr(self, '_PMMail__to', value),
lambda self: setattr(self, '_PMMail__to', None),
'''
The recipients, in either "<EMAIL>" or "<NAME> <<EMAIL>>" formats
'''
)
cc = property(
lambda self: self.__cc,
lambda self, value: setattr(self, '_PMMail__cc', value),
lambda self: setattr(self, '_PMMail__cc', None),
'''
The cc recipients, in either "<EMAIL>" or "<NAME> <<EMAIL>>" formats
'''
)
bcc = property(
lambda self: self.__bcc,
lambda self, value: setattr(self, '_PMMail__bcc', value),
lambda self: setattr(self, '_PMMail__bcc', None),
'''
The bcc recipients, in either "<EMAIL>" or "<NAME> <<EMAIL>>" formats
'''
)
subject = property(
lambda self: self.__subject,
lambda self, value: setattr(self, '_PMMail__subject', value),
lambda self: setattr(self, '_PMMail__subject', None),
'''
The subject of your email message
'''
)
tag = property(
lambda self: self.__tag,
lambda self, value: setattr(self, '_PMMail__tag', value),
lambda self: setattr(self, '_PMMail__tag', None),
'''
You can categorize outgoing email using the optional Tag property.
If you use different tags for the different types of emails your application generates,
you will be able to get detailed statistics for them through the Postmark user interface.
'''
)
html_body = property(
lambda self: self.__html_body,
lambda self, value: setattr(self, '_PMMail__html_body', value),
lambda self: setattr(self, '_PMMail__html_body', None),
'''
The email message body, in html format
'''
)
text_body = property(
lambda self: self.__text_body,
lambda self, value: setattr(self, '_PMMail__text_body', value),
lambda self: setattr(self, '_PMMail__text_body', None),
'''
The email message body, in text format
'''
)
track_opens = property(
lambda self: self.__track_opens,
lambda self, value: setattr(self, '_PMMail__track_opens', value),
lambda self: setattr(self, '_PMMail__track_opens', None),
'''
Whether or not to track opens
NOTE: Requires html_body to be set to work
'''
)
custom_headers = property(
lambda self: self.__custom_headers,
_set_custom_headers,
lambda self: setattr(self, '_PMMail__custom_headers', {}),
'''
Custom headers in a standard dictionary.
NOTE: To change the reply to address, use the .reply_to
property instead of a custom header.
'''
)
attachments = property(
lambda self: self.__attachments,
_set_attachments,
lambda self: setattr(self, '_PMMail__attachments', []),
'''
Attachments, Base64 encoded, in a list.
'''
)
# multipart = property(
# lambda self: self.__multipart,
# lambda self, value: setattr(self, '_PMMail__multipart', value),
# 'The API Key for one of your servers on Postmark'
# )
metadata = property(
lambda self: self.__metadata,
_set_metadata,
lambda self: setattr(self, '_PMMail_metadata', {}),
'''
Custom metadata key/value pairs returned by webhooks.
'''
)
template_id = property(
lambda self: self.__template_id,
lambda self, value: setattr(self, '_PMMail__template_id', value),
lambda self: setattr(self, '_PMMail__template_id', {}),
)
template_model = property(
lambda self: self.__template_model,
lambda self, value: setattr(self, '_PMMail__template_model', value),
lambda self: setattr(self, '_PMMail__template_model', {}),
)
message_id = property(
lambda self: self.__message_id,
lambda self, value: setattr(self, '_PMMail__message_id', value),
lambda self: setattr(self, '_PMMail__message_id', None),
'''
The email message ID, a UUID string.
'''
)
#####################
#
# LEGACY SUPPORT
#
#####################
recipient = property(
lambda self: self.__to,
lambda self, value: setattr(self, '_PMMail__to', value),
lambda self: setattr(self, '_PMMail__to', None),
'''
The recipients, in either "<EMAIL>" or | |
import zipfile
from collections import defaultdict
from django.http import HttpResponse
from rdmo.core.exports import prettify_xml
from rdmo.core.renderers import BaseXMLRenderer
from rdmo.projects.exports import Export
class RadarExport(Export):
identifier_type_options = {
'identifier_type/doi': 'DOI',
'identifier_type/handle': 'HANDLE',
'identifier_type/other': 'OTHER'
}
language_options = {
'language/en': 'eng',
'language/de': 'deu'
}
name_type_options = {
'name_type/personal': 'Personal',
'name_type/organizational': 'Organizational'
}
name_identifier_scheme_options = {
'name_identifier_scheme/orcid': 'ORCID',
'name_identifier_scheme/insi': 'INSI',
'name_identifier_scheme/ror': 'ROR',
'name_identifier_scheme/grid': 'GRID'
}
contributor_type_options = {
'contributor_type/contact_persion': 'ContactPerson',
'contributor_type/data_collector': 'DataCollector',
'contributor_type/data_curator': 'DataCurator',
'contributor_type/data_manager': 'DataManager',
'contributor_type/distributor': 'Distributor',
'contributor_type/editor': 'Editor',
'contributor_type/hosting_institution': 'HostingInstitution',
'contributor_type/producer': 'Producer',
'contributor_type/project_leader': 'ProjectLeader',
'contributor_type/project_manager': 'ProjectManager',
'contributor_type/project_member': 'ProjectMember',
'contributor_type/registration_agency': 'RegistrationAgency',
'contributor_type/registration_authority': 'RegistrationAuthority',
'contributor_type/related_person': 'RelatedPerson',
'contributor_type/researcher': 'Researcher',
'contributor_type/research_group': 'ResearchGroup',
'contributor_type/rights_holder': 'RightsHolder',
'contributor_type/sponsor': 'Sponsor',
'contributor_type/supervisor': 'Supervisor',
'contributor_type/work_package_leader': 'WorkPackageLeader',
'contributor_type/other': 'Other'
}
resource_type_options = {
'resource_type_general/audiovisual': 'Audiovisual',
'resource_type_general/collection': 'Collection',
'resource_type_general/data_paper': 'DataPaper',
'resource_type_general/dataset': 'Dataset',
'resource_type_general/event': 'Event',
'resource_type_general/image': 'Image',
'resource_type_general/interactive_resource': 'InteractiveResource',
'resource_type_general/model': 'Model',
'resource_type_general/physical_object': 'PhysicalObject',
'resource_type_general/service': 'Service',
'resource_type_general/software': 'Software',
'resource_type_general/sound': 'Sound',
'resource_type_general/text': 'Text',
'resource_type_general/workflow': 'Workflow',
'resource_type_general/other': 'Other'
}
controlled_subject_area_options = {
'radar_controlled_subject_area/agriculture': 'Agriculture',
'radar_controlled_subject_area/architecture': 'Architecture',
'radar_controlled_subject_area/arts_and_media': 'Arts and Media',
'radar_controlled_subject_area/astrophysics_and_astronomy': 'Astrophysics and Astronomy',
'radar_controlled_subject_area/biochemistry': 'Biochemistry',
'radar_controlled_subject_area/biology': 'Biology',
'radar_controlled_subject_area/behavioural_sciences': 'Behavioural Sciences',
'radar_controlled_subject_area/chemistry': 'Chemistry',
'radar_controlled_subject_area/computer_science': 'Computer Science',
'radar_controlled_subject_area/economics': 'Economics',
'radar_controlled_subject_area/engineering': 'Engineering',
'radar_controlled_subject_area/environmental_science_and_ecology': 'Environmental Science and Ecology',
'radar_controlled_subject_area/ethnology': 'Ethnology',
'radar_controlled_subject_area/geological_science': 'Geological Science',
'radar_controlled_subject_area/geography': 'Geography',
'radar_controlled_subject_area/history': 'History',
'radar_controlled_subject_area/horticulture': 'Horticulture',
'radar_controlled_subject_area/information_technology': 'Information Technology',
'radar_controlled_subject_area/life_science': 'Life Science',
'radar_controlled_subject_area/linguistics': 'Linguistics',
'radar_controlled_subject_area/materials_science': 'Materials Science',
'radar_controlled_subject_area/mathematics': 'Mathematics',
'radar_controlled_subject_area/medicine': 'Medicine',
'radar_controlled_subject_area/philosophy': 'Philosophy',
'radar_controlled_subject_area/physics': 'Physics',
'radar_controlled_subject_area/psychology': 'Psychology',
'radar_controlled_subject_area/social_sciences': 'Social Sciences',
'radar_controlled_subject_area/software_technology': 'Software Technology',
'radar_controlled_subject_area/sports': 'Sports',
'radar_controlled_subject_area/theology': 'Theology',
'radar_controlled_subject_area/veterinary_medicine': 'Veterinary Medicine',
'radar_controlled_subject_area/other': 'Other'
}
data_source_options = {
'radar_data_source/instrument': 'Instrument',
'radar_data_source/media': 'Media',
'radar_data_source/observation': 'Observation',
'radar_data_source/trial': 'Trial',
'radar_data_source/organism': 'Organism',
'radar_data_source/tissue': 'Tissue',
'radar_data_source/other': 'Other'
}
software_type_options = {
'radar_software_type/resource_production': 'Resource Production',
'radar_software_type/resource_processing': 'Resource Processing',
'radar_software_type/resource_viewing': 'Resource Viewing',
'radar_software_type/other': 'Other'
}
controlled_rights_options = {
'dataset_license_types/71': 'CC BY 4.0 Attribution',
'dataset_license_types/74': 'CC BY-ND 4.0 Attribution-NoDerivs',
'dataset_license_types/75': 'CC BY-SA 4.0 Attribution-ShareAlike',
'dataset_license_types/73': 'CC BY-NC 4.0 Attribution-NonCommercial',
# '': 'CC BY-NC-SA 4.0 Attribution-NonCommercial-ShareAlike',
# '': 'CC BY-NC-ND 4.0 Attribution-NonCommercial-NoDerivs',
'dataset_license_types/cc0': 'CC0 1.0 Universal Public Domain Dedication',
# '': 'All rights reserved',
'dataset_license_types/233': 'Other'
}
relation_type_options = {
'relation_type/is_cited_by': 'IsCitedBy',
'relation_type/cites': 'Cites',
'relation_type/is_supplement_to': 'IsSupplementTo',
'relation_type/is_supplemented_by': 'IsSupplementedBy',
'relation_type/is_continued_by': 'IsContinuedBy',
'relation_type/continues': 'Continues',
'relation_type/describes': 'Describes',
'relation_type/is_described_by': 'IsDescribedBy',
'relation_type/has_metadata': 'HasMetadata',
'relation_type/is_metadata_for': 'IsMetadataFor',
'relation_type/has_version': 'HasVersion',
'relation_type/is_version_of': 'IsVersionOf',
'relation_type/is_new_version_of': 'IsNewVersionOf',
'relation_type/is_previous_version_of': 'IsPreviousVersionOf',
'relation_type/is_part_of': 'IsPartOf',
'relation_type/has_part': 'HasPart',
'relation_type/is_published_in': 'IsPublishedIn',
'relation_type/is_referenced_by': 'IsReferencedBy',
'relation_type/references': 'References',
'relation_type/is_documented_by': 'IsDocumentedBy',
'relation_type/documents': 'Documents',
'relation_type/is_compiled_by': 'IsCompiledBy',
'relation_type/Compiles': 'Compiles',
'relation_type/is_variant_form_of': 'IsVariantFormOf',
'relation_type/is_original_form_of': 'IsOriginalFormOf',
'relation_type/is_identical_to': 'IsIdenticalTo',
'relation_type/is_reviewed_by': 'IsReviewedBy',
'relation_type/reviews': 'Reviews',
'relation_type/is_derived_from': 'IsDerivedFrom',
'relation_type/is_source_of': 'IsSourceOf',
'relation_type/is_required_by': 'IsRequiredBy',
'relation_type/requires': 'Requires',
'relation_type/obsoletes': 'Obsoletes',
'relation_type/is_obsoleted_by': 'IsObsoletedBy'
}
class Renderer(BaseXMLRenderer):
scheme_uri = {
'INSI': 'http://www.isni.org/',
'ORCID': 'https://orcid.org',
'ROR': 'https://ror.org/',
'GRID': 'https://www.grid.ac/'
}
def render_document(self, xml, dataset):
xml.startElement('ns2:radarDataset', {
'xmlns': 'http://radar-service.eu/schemas/descriptive/radar/v09/radar-elements',
'xmlns:ns2': 'http://radar-service.eu/schemas/descriptive/radar/v09/radar-dataset'
})
# identifier
identifier = dataset.get('identifier')
if identifier:
self.render_text_element(xml, 'identifier', {
'identifierType': dataset.get('identifierType', 'OTHER')
}, identifier)
# creators
creators = dataset.get('creators')
if creators:
xml.startElement('creators', {})
for creator in creators:
xml.startElement('creator', {})
self.render_text_element(xml, 'creatorName', {
'nameType': creator.get('nameType')
}, creator.get('name'))
if creator.get('givenName'):
self.render_text_element(xml, 'givenName', {}, creator.get('givenName'))
if creator.get('familyName'):
self.render_text_element(xml, 'familyName', {}, creator.get('familyName'))
if creator.get('nameIdentifier'):
self.render_text_element(xml, 'nameIdentifier', {
'nameIdentifierScheme': creator.get('nameIdentifierScheme'),
'schemeURI': self.scheme_uri.get(creator.get('schemeURI')),
}, creator.get('nameIdentifier'))
for affiliation in creator.get('affiliations', []):
self.render_text_element(xml, 'affiliation', {
'affiliationIdentifier': affiliation.get('affiliationIdentifier'),
'affiliationIdentifierScheme': affiliation.get('affiliationIdentifierScheme')
}, affiliation.get('affiliation'))
xml.endElement('creator')
xml.endElement('creators')
# title
title = dataset.get('title')
if title:
self.render_text_element(xml, 'title', {}, title)
# publisher
publisher = dataset.get('publisher')
if publisher:
self.render_text_element(xml, 'publisher', {}, publisher)
# productionYear
production_year = dataset.get('productionYear')
if production_year:
self.render_text_element(xml, 'productionYear', {}, production_year)
# publicationYear
publication_year = dataset.get('publicationYear')
if publication_year:
self.render_text_element(xml, 'publicationYear', {}, publication_year)
# subjectArea
subject_areas = dataset.get('subjectAreas')
if subject_areas:
xml.startElement('subjectAreas', {})
for subject_area in subject_areas:
xml.startElement('subjectArea', {})
self.render_text_element(xml, 'controlledSubjectAreaName', {}, subject_area.get('controlledSubjectAreaName'))
if subject_area.get('additionalSubjectAreaName'):
self.render_text_element(xml, 'additionalSubjectAreaName', {}, subject_area.get('additionalSubjectAreaName'))
xml.endElement('subjectArea')
xml.endElement('subjectAreas')
# resource
resource = dataset.get('resource')
if resource:
if dataset.get('resource'):
self.render_text_element(xml, 'resource', {
'resourceType': dataset.get('resourceType')
}, resource)
# rights
rights_list = dataset.get('rights')
if rights_list:
xml.startElement('rights', {})
for rights in rights_list:
self.render_text_element(xml, 'controlledRights', {}, rights.get('controlledRights'))
additional_rights = rights.get('additionalRights')
if additional_rights:
self.render_text_element(xml, 'additionalRights', {}, additional_rights)
xml.endElement('rights')
# rightsHolders
rights_holders = dataset.get('rightsHolders')
if rights_holders:
xml.startElement('rightsHolders', {})
for rights_holder in rights_holders:
self.render_text_element(xml, 'rightsHolder', {}, rights_holder)
xml.endElement('rightsHolders')
# additionalTitles
additional_titles = dataset.get('additionalTitles')
if additional_titles:
xml.startElement('additionalTitles', {})
for additional_title in additional_titles:
self.render_text_element(xml, 'additionalTitle', {
'additionalTitleType': additional_title['additionalTitleType']
}, additional_title['additionalTitle'])
xml.endElement('additionalTitles')
# descriptions
descriptions = dataset.get('descriptions')
if descriptions:
xml.startElement('descriptions', {})
for description in descriptions:
self.render_text_element(xml, 'description', {
'descriptionType': description.get('descriptionType', 'Abstract')
}, description.get('description'))
xml.endElement('descriptions')
# keywords
keywords = dataset.get('keywords')
if keywords:
xml.startElement('keywords', {})
for keyword in keywords:
self.render_text_element(xml, 'keyword', {}, keyword)
xml.endElement('keywords')
# contributors
contributors = dataset.get('contributors')
if contributors:
xml.startElement('contributors', {})
for contributor in dataset.get('contributors', []):
xml.startElement('contributor', {})
self.render_text_element(xml, 'contributorName', {
'nameType': contributor.get('nameType')
}, contributor.get('name'))
if contributor.get('givenName'):
self.render_text_element(xml, 'givenName', {}, contributor.get('givenName'))
if contributor.get('familyName'):
self.render_text_element(xml, 'familyName', {}, contributor.get('familyName'))
if contributor.get('nameIdentifier'):
self.render_text_element(xml, 'nameIdentifier', {
'nameIdentifierScheme': contributor.get('nameIdentifierScheme'),
'schemeURI': self.scheme_uri.get(contributor.get('schemeURI')),
}, contributor.get('nameIdentifier'))
for affiliation in contributor.get('affiliations', []):
self.render_text_element(xml, 'affiliation', {
'affiliationIdentifier': affiliation.get('affiliationIdentifier'),
'affiliationIdentifierScheme': affiliation.get('affiliationIdentifierScheme')
}, affiliation.get('affiliation'))
xml.endElement('contributor')
xml.endElement('contributors')
# language
language = dataset.get('language')
if language:
self.render_text_element(xml, 'language', {}, language)
# resource_type
resource_type = dataset.get('resourceType')
if resource_type:
self.render_text_element(xml, 'resourceType', {
'resourceTypeGeneral': dataset.get('resourceTypeGeneral')
}, resource_type)
# alternate_identifiers
alternate_identifiers = dataset.get('alternateIdentifiers')
if alternate_identifiers:
xml.startElement('alternateIdentifiers', {})
for alternate_identifier in alternate_identifiers:
self.render_text_element(xml, 'alternateIdentifier', {
'alternateIdentifierType': alternate_identifier.get('alternateIdentifierType')
}, alternate_identifier.get('alternateIdentifier'))
xml.endElement('alternateIdentifiers')
# related_identifiers
related_identifiers = dataset.get('relatedIdentifiers')
if related_identifiers:
xml.startElement('relatedIdentifiers', {})
for related_identifier in related_identifiers:
self.render_text_element(xml, 'relatedIdentifier', {
'relatedIdentifierType': related_identifier.get('relatedIdentifierType'),
'relationType': related_identifier.get('relationType')
}, related_identifier.get('relatedIdentifier'))
xml.endElement('relatedIdentifiers')
# dataSources
data_sources = dataset.get('dataSources')
if data_sources:
xml.startElement('dataSources', {})
for data_source in data_sources:
self.render_text_element(xml, 'dataSource', {
'dataSourceDetail': data_source.get('dataSourceDetail')
}, data_source.get('dataSource'))
xml.endElement('dataSources')
# software
software = dataset.get('software')
if software:
xml.startElement('software', {})
for software_type in software:
xml.startElement('softwareType', {
'type': software_type.get('type')
})
self.render_text_element(xml, 'softwareName', {
'softwareVersion': software_type.get('softwareVersion')
}, software_type.get('softwareName'))
if 'alternativeSoftwareName' in software_type:
self.render_text_element(xml, 'alternativeSoftwareName', {
'alternativeSoftwareVersion': software_type.get('alternativeSoftwareVersion')
}, software_type.get('alternativeSoftwareName'))
xml.endElement('softwareType')
xml.endElement('software')
# processing
processing_list = dataset.get('dataProcessing')
if processing_list:
xml.startElement('processing', {})
for processing in processing_list:
self.render_text_element(xml, 'dataProcessing', {}, processing)
xml.endElement('processing')
# relatedInformations
related_informations = dataset.get('relatedInformations')
if related_informations:
xml.startElement('relatedInformations', {})
for related_information in related_informations:
self.render_text_element(xml, 'relatedInformation', {
'relatedInformationType': related_information.get('relatedInformationType')
}, related_information.get('relatedInformation'))
xml.endElement('relatedInformations')
# funding_references
funding_references = dataset.get('fundingReferences')
if funding_references:
xml.startElement('fundingReferences', {})
for funding_reference in funding_references:
xml.startElement('fundingReference', {})
self.render_text_element(xml, 'funderName', {}, funding_reference.get('funderName'))
self.render_text_element(xml, 'funderIdentifier', {
'schemeURI': self.scheme_uri.get(funding_reference.get('funderIdentifierType')),
'funderIdentifierType': funding_reference.get('funderIdentifierType')
}, funding_reference.get('funderIdentifier'))
if funding_reference.get('awardNumber'):
self.render_text_element(xml, 'awardNumber', {
'awardURI': funding_reference.get('awardURI')
}, funding_reference.get('awardNumber'))
if funding_reference.get('awardTitle'):
self.render_text_element(xml, 'awardTitle', {}, funding_reference.get('awardTitle'))
xml.endElement('fundingReference')
xml.endElement('fundingReferences')
xml.endElement('ns2:radarDataset')
def render(self):
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'filename="%s.zip"' % self.project.title
zip_file = zipfile.ZipFile(response, 'w')
for dataset in self.get_datasets():
xmldata = self.Renderer().render(dataset)
zip_file.writestr(dataset.get('file_name'), prettify_xml(xmldata))
return response
def get_datasets(self):
datasets = []
for rdmo_dataset in self.get_set('project/dataset/id'):
set_index = rdmo_dataset.set_index
dataset = defaultdict(list)
# file_name
dataset['file_name'] = '{}.xml'.format(
self.get_text('project/dataset/identifier', set_index=set_index) or
self.get_text('project/dataset/id', set_index=set_index) or
str(set_index + 1)
)
# identifier
identifier = self.get_text('project/dataset/identifier', set_index=set_index)
if identifier:
dataset['identifier'] = identifier
dataset['identifierType'] = \
self.get_option(self.identifier_type_options, 'project/dataset/identifier_type', set_index=set_index) or \
self.get_option(self.identifier_type_options, 'project/dataset/pids/system', set_index=set_index) or \
'OTHER'
else:
dataset['identifier'] = self.get_text('project/dataset/id', set_index=set_index)
dataset['identifierType'] = 'OTHER'
# creators
for creator_set in self.get_set('project/dataset/creator/name', set_prefix=str(set_index)):
creator = self.get_name('project/dataset/creator',
set_prefix=creator_set.set_prefix, set_index=creator_set.set_index)
if creator:
dataset['creators'].append(creator)
# title
dataset['title'] = \
self.get_text('project/dataset/title', set_index=set_index) or \
self.get_text('project/dataset/id', set_index=set_index) or \
'Dataset #{}'.format(set_index + 1)
# publisher
publisher = \
self.get_text('project/dataset/publisher', set_index=set_index) or \
self.get_text('project/dataset/preservation/repository', set_index=set_index)
if publisher:
dataset['publisher'] = publisher
# productionYear
dataset['productionYear'] = \
self.get_year('project/dataset/created', set_index=set_index) or \
self.get_year('project/dataset/data_publication_date', set_index=set_index)
# publicationYear
dataset['publicationYear'] = \
self.get_year('project/dataset/issued', set_index=set_index) or \
self.get_year('project/dataset/data_publication_date', set_index=set_index)
# subjectArea
subject_areas = \
self.get_values('project/dataset/subject', set_index=set_index) or \
self.get_values('project/research_field/title', set_index=set_index)
if subject_areas:
dataset['subjectAreas'] = []
for subject_area in subject_areas:
if subject_area.is_true:
if subject_area.option:
controlled_subject_area_name = self.controlled_subject_area_options.get(subject_area.option.path, 'Other')
else:
controlled_subject_area_name = 'Other'
if controlled_subject_area_name == 'Other':
dataset['subjectAreas'].append({
'controlledSubjectAreaName': controlled_subject_area_name,
'additionalSubjectAreaName': subject_area.value
})
else:
dataset['subjectAreas'].append({
'controlledSubjectAreaName': controlled_subject_area_name
})
# resource
resource_type = self.get_text('project/dataset/resource_type', set_index=set_index)
if resource_type:
dataset['resourceType'] = resource_type
dataset['resourceTypeGeneral'] = \
self.get_option(self.resource_type_options, 'project/dataset/resource_type_general', set_index=set_index)
# alternate_identifiers
for alternate_identifier_set in self.get_set('project/dataset/alternate_identifier/identifier', set_prefix=str(set_index)):
dataset['alternateIdentifiers'].append({
'alternateIdentifier': self.get_text('project/dataset/alternate_identifier/identifier',
set_prefix=alternate_identifier_set.set_prefix,
set_index=alternate_identifier_set.set_index),
'alternateIdentifierType': self.get_option(self.identifier_type_options,
'project/dataset/alternate_identifier/identifier_type',
set_prefix=alternate_identifier_set.set_prefix,
set_index=alternate_identifier_set.set_index)
})
# related_identifiers
for related_identifier_set in self.get_set('project/dataset/related_identifier/identifier', set_prefix=str(set_index)):
dataset['relatedIdentifiers'].append({
'relatedIdentifier': self.get_text('project/dataset/related_identifier/identifier',
set_prefix=related_identifier_set.set_prefix,
set_index=related_identifier_set.set_index),
'relatedIdentifierType': self.get_option(self.identifier_type_options,
'project/dataset/related_identifier/identifier_type',
set_prefix=related_identifier_set.set_prefix,
set_index=related_identifier_set.set_index),
'relationType': self.get_option(self.relation_type_options,
'project/dataset/related_identifier/relation_type',
set_prefix=related_identifier_set.set_prefix,
set_index=related_identifier_set.set_index)
})
# rights
rights_list = self.get_values('project/dataset/sharing/conditions', set_index=set_index)
if rights_list:
dataset['rights'] = []
for rights in rights_list:
if rights.option:
controlled_rights = self.controlled_rights_options.get(rights.option.path, 'Other')
else:
controlled_rights = 'Other'
dataset['rights'].append({
'controlledRights': controlled_rights,
'additionalRights': rights.value if controlled_rights == 'Other' else None
})
# rights holders
rights_holders = self.get_list('project/dataset/sharing/rights_holder', set_index=set_index)
if rights_holders:
dataset['rightsHolders'] = rights_holders
# description
description = self.get_text('project/dataset/description', set_index=set_index)
if description:
dataset['descriptions'] = [{
'description': description,
'descriptionType': 'Abstract'
}]
# keywords
keywords = self.get_list('project/research_question/keywords')
if keywords:
dataset['keywords'] | |
(bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectErrorList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
kwargs['path_list'] = \
path_list
return self.delete_objects_endpoint.call_with_http_info(**kwargs)
def get_object(
self,
repository,
ref,
path,
**kwargs
):
"""get object content # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_object(repository, ref, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
path (str): relative to the ref
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
kwargs['path'] = \
path
return self.get_object_endpoint.call_with_http_info(**kwargs)
def get_underlying_properties(
self,
repository,
ref,
path,
**kwargs
):
"""get object properties on underlying storage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_underlying_properties(repository, ref, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
path (str): relative to the branch
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UnderlyingObjectProperties
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
kwargs['path'] = \
path
return self.get_underlying_properties_endpoint.call_with_http_info(**kwargs)
def list_objects(
self,
repository,
ref,
**kwargs
):
"""list objects under a given prefix # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_objects(repository, ref, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
Keyword Args:
user_metadata (bool): [optional] if omitted the server will use the default value of True
after (str): return items after this value. [optional]
amount (int): how many items to return. [optional] if omitted the server will use the default value of 100
delimiter (str): delimiter used to group common prefixes by. [optional]
prefix (str): return items prefixed with this value. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectStatsList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
return self.list_objects_endpoint.call_with_http_info(**kwargs)
def stage_object(
self,
repository,
branch,
path,
object_stage_creation,
**kwargs
):
"""stage an object's metadata for the given branch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stage_object(repository, branch, path, object_stage_creation, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
path (str): relative to the branch
object_stage_creation (ObjectStageCreation):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectStats
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
kwargs['path'] = \
path
kwargs['object_stage_creation'] = \
object_stage_creation
return self.stage_object_endpoint.call_with_http_info(**kwargs)
def stat_object(
self,
repository,
ref,
path,
**kwargs
):
"""get object metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stat_object(repository, ref, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
path (str): relative to the branch
Keyword Args:
user_metadata (bool): [optional] if omitted the | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import cv2
import numpy as np
import torch
from multipledispatch import dispatch
from torch import nn
from torch import Tensor
from torchvision.transforms.functional import adjust_brightness
from torchvision.transforms.functional import adjust_contrast
from torchvision.transforms.functional import adjust_gamma
from torchvision.transforms.functional import adjust_hue
from torchvision.transforms.functional import adjust_saturation
from torchvision.transforms.functional import adjust_sharpness
from torchvision.transforms.functional import autocontrast
from torchvision.transforms.functional import equalize
from torchvision.transforms.functional import invert
from torchvision.transforms.functional import posterize
from torchvision.transforms.functional import solarize
from torchvision.transforms.functional_tensor import _assert_channels
from torchvision.transforms.functional_tensor import _assert_image_tensor
from torchvision.transforms.functional_tensor import _hsv2rgb
from torchvision.transforms.functional_tensor import _rgb2hsv
from onevision.cv.core.channels import get_num_channels
from onevision.factory import TRANSFORMS
from onevision.type import TensorOrArray
__all__ = [
"adjust_brightness",
"adjust_contrast",
#"adjust_gamma",
"adjust_hsv",
"adjust_hue",
"adjust_saturation",
"adjust_sharpness",
"autocontrast",
"equalize",
"invert",
"posterize",
"solarize",
"AdjustBrightness",
"AdjustContrast",
"AdjustGamma",
"AdjustHsv",
"AdjustHue",
"AdjustSaturation",
"AdjustSharpness",
"AutoContrast",
"Equalize",
"Invert",
"Posterize",
"Solarize",
]
# MARK: - Functional
@dispatch(Tensor, h_factor=float, s_factor=float, v_factor=float)
def adjust_hsv(
image : Tensor,
h_factor: float = 0.5,
s_factor: float = 0.5,
v_factor: float = 0.5,
) -> Tensor:
if not isinstance(image, Tensor):
raise TypeError(f"`image` must be a `Tensor`. But got: {type(image)}")
_assert_image_tensor(image)
_assert_channels(image, [1, 3])
if get_num_channels(image) == 1: # Match PIL behaviour
return image
orig_dtype = image.dtype
if image.dtype == torch.uint8:
image = image.to(dtype=torch.float32) / 255.0
image = _rgb2hsv(image)
h, s, v = image.unbind(dim=-3)
h = (h * h_factor).clamp(0, 1)
s = (s * s_factor).clamp(0, 1)
v = (v * v_factor).clamp(0, 1)
image = torch.stack((h, s, v), dim=-3)
img_hue_adj = _hsv2rgb(image)
if orig_dtype == torch.uint8:
img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)
return img_hue_adj
@dispatch(np.ndarray, h_factor=float, s_factor=float, v_factor=float)
def adjust_hsv(
image : np.ndarray,
h_factor: float = 0.5,
s_factor: float = 0.5,
v_factor: float = 0.5,
) -> np.ndarray:
if not (isinstance(image, np.ndarray)):
raise TypeError(f"`image` must be a `np.ndarray`. But got: {type(image)}.")
# Random gains
r = np.random.uniform(-1, 1, 3) * [h_factor, s_factor, v_factor] + 1
hue, sat, val = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2HSV))
dtype = image.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge(
(cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))
).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=image) # No return needed
return image
# MARK: - Modules
@TRANSFORMS.register(name="adjust_brightness")
class AdjustBrightness(nn.Module):
"""Adjust brightness of an image.
Args:
brightness_factor (float):
How much to adjust the brightness. Can be any non-negative number.
0 gives a black image, 1 gives the original image while 2 increases
the brightness by a factor of 2.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> AdjustBrightness(1.)(x)
image([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.ones(2)
>>> AdjustBrightness(y)(x).shape
torch.Size([2, 5, 3, 3])
"""
# MARK: Magic Functions
def __init__(self, brightness_factor: float):
super().__init__()
self.brightness_factor = brightness_factor
# MARK: Forward Pass
def forward(self, image: Tensor) -> Tensor:
"""
Args:
image (Tensor[..., 1 or 3, H, W]):
Image to be adjusted. If img is Tensor, it is expected to
be in [..., 1 or 3, H, W] format, where ... means it can have
an arbitrary number of leading dimensions.
Returns:
(Tensor[..., 1 or 3, H, W]):
Brightness adjusted image.
"""
return adjust_brightness(image, self.brightness_factor)
@TRANSFORMS.register(name="adjust_contrast")
class AdjustContrast(nn.Module):
"""Adjust contrast of an image.
Args:
contrast_factor (float):
How much to adjust the contrast. Can be any non-negative number.
0 gives a solid gray image, 1 gives the original image while 2
increases the contrast by a factor of 2.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> AdjustContrast(0.5)(x)
image([[[[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.ones(2)
>>> AdjustContrast(y)(x).shape
torch.Size([2, 5, 3, 3])
"""
# MARK: Magic Functions
def __init__(self, contrast_factor: float):
super().__init__()
self.contrast_factor = contrast_factor
# MARK: Forward Pass
def forward(self, image: Tensor) -> Tensor:
"""
Args:
image (Tensor[..., 1 or 3, H, W]):
Image to be adjusted. If img is Tensor, it is expected to
be in [..., 1 or 3, H, W] format, where ... means it can have
an arbitrary number of leading dimensions.
Returns:
(Tensor[..., 1 or 3, H, W]):
Contrast adjusted image.
"""
return adjust_contrast(image, self.contrast_factor)
@TRANSFORMS.register(name="adjust_gamma")
class AdjustGamma(nn.Module):
"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
gamma (float, Tensor):
Non-negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker, while gamma smaller
than 1 make dark regions lighter.
gain (float, Tensor):
Constant multiplier.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> AdjustGamma(1.0, 2.0)(x)
image([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y1 = torch.ones(2) * 1.0
>>> y2 = torch.ones(2) * 2.0
>>> AdjustGamma(y1, y2)(x).shape
torch.Size([2, 5, 3, 3])
"""
# MARK: Magic Functions
def __init__(self, gamma: float, gain: float = 1.0):
super().__init__()
self.gamma = gamma
self.gain = gain
# MARK: Forward Pass
def forward(self, image: Tensor) -> Tensor:
"""
Args:
image (Tensor[..., 1 or 3, H, W]):
Image to be adjusted. If img is Tensor, it is expected to
be in [..., 1 or 3, H, W] format, where ... means it can have
an arbitrary number of leading dimensions.
Returns:
(Tensor[..., 1 or 3, H, W]):
Gamma correction adjusted image.
"""
return adjust_gamma(image, self.gamma, self.gain)
@TRANSFORMS.register(name="adjust_hsv")
class AdjustHsv(nn.Module):
"""Adjust HSV of an image.
Args:
h_factor (float):
How much to shift the hue channel.
s_factor (float):
How much to shift the saturation channel.
v_factor (float):
How much to shift the value channel.
"""
# MARK: Magic Functions
def __init__(
self,
h_factor: float = 0.5,
s_factor: float = 0.5,
v_factor: float = 0.5,
):
super().__init__()
self.h_factor = h_factor
self.s_factor = s_factor
self.v_factor = v_factor
# MARK: Forward Pass
def forward(self, image: TensorOrArray) -> TensorOrArray:
"""
Args:
image (TensorOrArray[..., 1 or 3, H, W]):
If image is Tensor, it is expected to be in
[..., 1 or 3, H, W] format, where ... means it can have an
arbitrary number of leading dimensions. If img is PIL Image
mode "1", "I", "F" and modes with transparency (alpha channel)
are not supported.
Returns:
(TensorOrArray[..., 1 or 3, H, W]):
Hue adjusted image.
"""
return adjust_hsv(image, self.h_factor, self.s_factor, self.v_factor)
@TRANSFORMS.register(name="adjust_hue")
class AdjustHue(nn.Module):
"""Adjust hue of an image.
Image hue is adjusted by converting the image to HSV and cyclically
shifting the intensities in the hue channel (H). Image is then
converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
hue_factor (float, Tensor):
How much to shift the hue channel. Should be in [-0.5, 0.5].
0.5 and -0.5 give complete reversal of hue channel in HSV space in
positive and negative direction respectively. 0 means no shift.
Therefore, both -0.5 and 0.5 will give an image with complementary
colors while 0 gives the original image.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> AdjustHue(3.141516)(x)
image([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2) * 3.141516
>>> AdjustHue(y)(x).shape
torch.Size([2, 3, 3, 3])
"""
# MARK: Magic Functions
def __init__(self, hue_factor: float):
super().__init__()
self.hue_factor = hue_factor
# MARK: Forward Pass
def forward(self, image: Tensor) -> Tensor:
"""
Args:
image (Tensor[..., 1 or 3, H, W]):
If image is Tensor, it is expected to be in
[..., 1 or 3, H, W] format, where ... means it can have an
arbitrary number of leading dimensions. If img is PIL Image
mode "1", "I", "F" and modes with transparency (alpha channel)
are not supported.
Returns:
(Tensor[..., 1 or 3, H, W]):
Hue adjusted image.
"""
return adjust_hue(image, self.hue_factor)
@TRANSFORMS.register(name="adjust_saturation")
class | |
function
cFuncNowUnc = interpolator(mLvl, pLvl, cLvl)
# Combine the constrained and unconstrained functions into the true consumption function
cFuncNow = LowerEnvelope2D(cFuncNowUnc, self.cFuncNowCnst)
# Make the marginal value function
vPfuncNow = self.makevPfunc(cFuncNow)
# Pack up the solution and return it
solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=0.0)
return solution_now
def makevPfunc(self, cFunc):
'''
Constructs the marginal value function for this period.
Parameters
----------
cFunc : function
Consumption function this period, defined over market resources and
persistent income level.
Returns
-------
vPfunc : function
Marginal value (of market resources) function for this period.
'''
vPfunc = MargValueFunc2D(cFunc, self.CRRA)
return vPfunc
def makevFunc(self, solution):
'''
Creates the value function for this period, defined over market resources
m and persistent income p. self must have the attribute EndOfPrdvFunc in
order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
market resources m and persistent income p: v = vFuncNow(m,p).
'''
mSize = self.aXtraGrid.size
pSize = self.pLvlGrid.size
# Compute expected value and marginal value on a grid of market resources
pLvl_temp = np.tile(self.pLvlGrid, (mSize, 1)) # Tile pLvl across m values
mLvl_temp = np.tile(self.mLvlMinNow(self.pLvlGrid), (mSize, 1)) +\
np.tile(np.reshape(self.aXtraGrid, (mSize, 1)), (1, pSize))*pLvl_temp
cLvlNow = solution.cFunc(mLvl_temp, pLvl_temp)
aLvlNow = mLvl_temp - cLvlNow
vNow = self.u(cLvlNow) + self.EndOfPrdvFunc(aLvlNow, pLvl_temp)
vPnow = self.uP(cLvlNow)
# Calculate pseudo-inverse value and its first derivative (wrt mLvl)
vNvrs = self.uinv(vNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNow)
# Add data at the lower bound of m
mLvl_temp = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid), (1, pSize)), mLvl_temp), axis=0)
vNvrs = np.concatenate((np.zeros((1, pSize)), vNvrs), axis=0)
vNvrsP = np.concatenate((np.reshape(vNvrsP[0, :], (1, vNvrsP.shape[1])), vNvrsP), axis=0)
# Add data at the lower bound of p
MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA))
m_temp = np.reshape(mLvl_temp[:, 0], (mSize+1, 1))
mLvl_temp = np.concatenate((m_temp, mLvl_temp), axis=1)
vNvrs = np.concatenate((MPCminNvrs*m_temp, vNvrs), axis=1)
vNvrsP = np.concatenate((MPCminNvrs*np.ones((mSize+1, 1)), vNvrsP), axis=1)
# Construct the pseudo-inverse value function
vNvrsFunc_list = []
for j in range(pSize+1):
pLvl = np.insert(self.pLvlGrid, 0, 0.0)[j]
vNvrsFunc_list.append(CubicInterp(mLvl_temp[:, j]-self.mLvlMinNow(pLvl),
vNvrs[:, j], vNvrsP[:, j], MPCminNvrs*self.hLvlNow(pLvl), MPCminNvrs))
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_list,
np.insert(self.pLvlGrid, 0, 0.0)) # Value function "shifted"
vNvrsFuncNow = VariableLowerBoundFunc2D(vNvrsFuncBase, self.mLvlMinNow)
# "Re-curve" the pseudo-inverse value function into the value function
vFuncNow = ValueFunc2D(vNvrsFuncNow, self.CRRA)
return vFuncNow
def makeBasicSolution(self, EndOfPrdvP, aLvl, pLvl, interpolator):
'''
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvl : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
pLvl : np.array
Array of persistent income levels that yield the marginal values
in EndOfPrdvP (corresponding pointwise to aLvl).
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
cLvl, mLvl = self.getPointsForInterpolation(EndOfPrdvP, aLvl)
pLvl_temp = np.concatenate((np.reshape(self.pLvlGrid, (self.pLvlGrid.size, 1)), pLvl), axis=-1)
pLvl_temp = np.concatenate((np.zeros((1, mLvl.shape[1])), pLvl_temp))
solution_now = self.usePointsForInterpolation(cLvl, mLvl, pLvl_temp, interpolator)
return solution_now
def makeLinearcFunc(self, mLvl, pLvl, cLvl):
'''
Makes a quasi-bilinear interpolation to represent the (unconstrained)
consumption function.
Parameters
----------
mLvl : np.array
Market resource points for interpolation.
pLvl : np.array
Persistent income level points for interpolation.
cLvl : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : LinearInterp
The unconstrained consumption function for this period.
'''
cFunc_by_pLvl_list = [] # list of consumption functions for each pLvl
for j in range(pLvl.shape[0]):
pLvl_j = pLvl[j, 0]
m_temp = mLvl[j, :] - self.BoroCnstNat(pLvl_j)
c_temp = cLvl[j, :] # Make a linear consumption function for this pLvl
if pLvl_j > 0:
cFunc_by_pLvl_list.append(LinearInterp(m_temp, c_temp, lower_extrap=True,
slope_limit=self.MPCminNow,
intercept_limit=self.MPCminNow*self.hLvlNow(pLvl_j)))
else:
cFunc_by_pLvl_list.append(LinearInterp(m_temp, c_temp, lower_extrap=True))
pLvl_list = pLvl[:, 0]
cFuncUncBase = LinearInterpOnInterp1D(cFunc_by_pLvl_list, pLvl_list) # Combine all linear cFuncs
cFuncUnc = VariableLowerBoundFunc2D(
cFuncUncBase, self.BoroCnstNat) # Re-adjust for natural borrowing constraint (as lower bound)
return cFuncUnc
def makeCubiccFunc(self, mLvl, pLvl, cLvl):
'''
Makes a quasi-cubic spline interpolation of the unconstrained consumption
function for this period. Function is cubic splines with respect to mLvl,
but linear in pLvl.
Parameters
----------
mLvl : np.array
Market resource points for interpolation.
pLvl : np.array
Persistent income level points for interpolation.
cLvl : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
'''
# Calculate the MPC at each gridpoint
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*np.sum(
self.vPPfuncNext(self.mLvlNext, self.pLvlNext)*self.ShkPrbs_temp, axis=0)
dcda = EndOfPrdvPP/self.uPP(np.array(cLvl[1:, 1:]))
MPC = dcda/(dcda+1.)
MPC = np.concatenate((np.reshape(MPC[:, 0],
(MPC.shape[0], 1)), MPC), axis=1)
# Stick an extra MPC value at bottom; MPCmax doesn't work
MPC = np.concatenate((self.MPCminNow*np.ones((1, self.aXtraGrid.size+1)), MPC), axis=0)
# Make cubic consumption function with respect to mLvl for each persistent income level
cFunc_by_pLvl_list = [] # list of consumption functions for each pLvl
for j in range(pLvl.shape[0]):
pLvl_j = pLvl[j, 0]
m_temp = mLvl[j, :] - self.BoroCnstNat(pLvl_j)
c_temp = cLvl[j, :] # Make a cubic consumption function for this pLvl
MPC_temp = MPC[j, :]
if pLvl_j > 0:
cFunc_by_pLvl_list.append(CubicInterp(
m_temp, c_temp, MPC_temp, lower_extrap=True,
slope_limit=self.MPCminNow, intercept_limit=self.MPCminNow*self.hLvlNow(pLvl_j)))
else: # When pLvl=0, cFunc is linear
cFunc_by_pLvl_list.append(LinearInterp(m_temp, c_temp, lower_extrap=True))
pLvl_list = pLvl[:, 0]
cFuncUncBase = LinearInterpOnInterp1D(cFunc_by_pLvl_list, pLvl_list) # Combine all linear cFuncs
cFuncUnc = VariableLowerBoundFunc2D(cFuncUncBase, self.BoroCnstNat)
# Re-adjust for lower bound of natural borrowing constraint
return cFuncUnc
def addMPCandHumanWealth(self, solution):
'''
Take a solution and add human wealth and the bounding MPCs to it.
Parameters
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem.
Returns:
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem, but now
with human wealth and the bounding MPCs.
'''
solution.hNrm = 0.0 # Can't have None or setAndUpdateValues breaks, should fix
solution.hLvl = self.hLvlNow
solution.mLvlMin = self.mLvlMinNow
solution.MPCmin = self.MPCminNow
solution.MPCmax = 0.0 # MPCmax is actually a function in this model
return solution
def addvPPfunc(self, solution):
'''
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
'''
vPPfuncNow = MargMargValueFunc2D(solution.cFunc, self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution
def solve(self):
'''
Solves a one period consumption saving problem with risky income, with
persistent income explicitly tracked as a state variable.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and persistent income), a
marginal value function, bounding MPCs, and human wealth as a func-
tion of persistent income. Might also include a value function and
marginal marginal value function, depending on options selected.
'''
aLvl, pLvl = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
if self.vFuncBool:
self.makeEndOfPrdvFunc(EndOfPrdvP)
if self.CubicBool:
interpolator = self.makeCubiccFunc
else:
interpolator = self.makeLinearcFunc
solution = self.makeBasicSolution(EndOfPrdvP, aLvl, pLvl, interpolator)
solution = self.addMPCandHumanWealth(solution)
if self.vFuncBool:
solution.vFunc = self.makevFunc(solution)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution
def solveConsGenIncProcess(solution_next, IncomeDstn, LivPrb, DiscFac, CRRA, Rfree, pLvlNextFunc,
BoroCnstArt, aXtraGrid, pLvlGrid, vFuncBool, CubicBool):
'''
Solves the one period problem of a consumer who experiences persistent and
transitory shocks to his income. Unlike in ConsIndShock, consumers do not
necessarily have expected persistent income growth that is constant with respect
to their current level of pLvl. Instead, they have a function that translates
current pLvl into expected next period pLvl (subject to shocks).
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, persistent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac | |
<filename>Website/FlaskWebsite/env/Lib/site-packages/matplotlib/tests/test_colorbar.py
import numpy as np
import pytest
from matplotlib import cm
import matplotlib.colors as mcolors
from matplotlib import rc_context
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.colors import (
BoundaryNorm, LogNorm, PowerNorm, Normalize, NoNorm
)
from matplotlib.colorbar import Colorbar
from matplotlib.ticker import FixedLocator
from matplotlib.testing.decorators import check_figures_equal
def _get_cmap_norms():
"""
Define a colormap and appropriate norms for each of the four
possible settings of the extend keyword.
Helper function for _colorbar_extension_shape and
colorbar_extension_length.
"""
# Create a colormap and specify the levels it represents.
cmap = cm.get_cmap("RdBu", lut=5)
clevs = [-5., -2.5, -.5, .5, 1.5, 3.5]
# Define norms for the colormaps.
norms = dict()
norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1)
norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1)
norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1)
norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1)
return cmap, norms
def _colorbar_extension_shape(spacing):
"""
Produce 4 colorbars with rectangular extensions for either uniform
or proportional spacing.
Helper function for test_colorbar_extension_shape.
"""
# Get a colormap and appropriate norms for each extension type.
cmap, norms = _get_cmap_norms()
# Create a figure and adjust whitespace for subplots.
fig = plt.figure()
fig.subplots_adjust(hspace=4)
for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):
# Get the appropriate norm and use it to get colorbar boundaries.
norm = norms[extension_type]
boundaries = values = norm.boundaries
# note that the last value was silently dropped pre 3.3:
values = values[:-1]
# Create a subplot.
cax = fig.add_subplot(4, 1, i + 1)
# Generate the colorbar.
Colorbar(cax, cmap=cmap, norm=norm,
boundaries=boundaries, values=values,
extend=extension_type, extendrect=True,
orientation='horizontal', spacing=spacing)
# Turn off text and ticks.
cax.tick_params(left=False, labelleft=False,
bottom=False, labelbottom=False)
# Return the figure to the caller.
return fig
def _colorbar_extension_length(spacing):
"""
Produce 12 colorbars with variable length extensions for either
uniform or proportional spacing.
Helper function for test_colorbar_extension_length.
"""
# Get a colormap and appropriate norms for each extension type.
cmap, norms = _get_cmap_norms()
# Create a figure and adjust whitespace for subplots.
fig = plt.figure()
fig.subplots_adjust(hspace=.6)
for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):
# Get the appropriate norm and use it to get colorbar boundaries.
norm = norms[extension_type]
boundaries = values = norm.boundaries
values = values[:-1]
for j, extendfrac in enumerate((None, 'auto', 0.1)):
# Create a subplot.
cax = fig.add_subplot(12, 1, i*3 + j + 1)
# Generate the colorbar.
Colorbar(cax, cmap=cmap, norm=norm,
boundaries=boundaries, values=values,
extend=extension_type, extendfrac=extendfrac,
orientation='horizontal', spacing=spacing)
# Turn off text and ticks.
cax.tick_params(left=False, labelleft=False,
bottom=False, labelbottom=False)
# Return the figure to the caller.
return fig
@image_comparison(['colorbar_extensions_shape_uniform.png',
'colorbar_extensions_shape_proportional.png'])
def test_colorbar_extension_shape():
"""Test rectangular colorbar extensions."""
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
# Create figures for uniform and proportionally spaced colorbars.
_colorbar_extension_shape('uniform')
_colorbar_extension_shape('proportional')
@image_comparison(['colorbar_extensions_uniform.png',
'colorbar_extensions_proportional.png'],
tol=1.0)
def test_colorbar_extension_length():
"""Test variable length colorbar extensions."""
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
# Create figures for uniform and proportionally spaced colorbars.
_colorbar_extension_length('uniform')
_colorbar_extension_length('proportional')
@pytest.mark.parametrize('use_gridspec', [True, False])
@image_comparison(['cbar_with_orientation',
'cbar_locationing',
'double_cbar',
'cbar_sharing',
],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_colorbar_positioning(use_gridspec):
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
# -------------------
plt.figure()
plt.contourf(data, levels=levels)
plt.colorbar(orientation='horizontal', use_gridspec=use_gridspec)
locations = ['left', 'right', 'top', 'bottom']
plt.figure()
for i, location in enumerate(locations):
plt.subplot(2, 2, i + 1)
plt.contourf(data, levels=levels)
plt.colorbar(location=location, use_gridspec=use_gridspec)
# -------------------
plt.figure()
# make some other data (random integers)
data_2nd = np.array([[2, 3, 2, 3], [1.5, 2, 2, 3], [2, 3, 3, 4]])
# make the random data expand to the shape of the main data
data_2nd = np.repeat(np.repeat(data_2nd, 10, axis=1), 10, axis=0)
color_mappable = plt.contourf(data, levels=levels, extend='both')
# test extend frac here
hatch_mappable = plt.contourf(data_2nd, levels=[1, 2, 3], colors='none',
hatches=['/', 'o', '+'], extend='max')
plt.contour(hatch_mappable, colors='black')
plt.colorbar(color_mappable, location='left', label='variable 1',
use_gridspec=use_gridspec)
plt.colorbar(hatch_mappable, location='right', label='variable 2',
use_gridspec=use_gridspec)
# -------------------
plt.figure()
ax1 = plt.subplot(211, anchor='NE', aspect='equal')
plt.contourf(data, levels=levels)
ax2 = plt.subplot(223)
plt.contourf(data, levels=levels)
ax3 = plt.subplot(224)
plt.contourf(data, levels=levels)
plt.colorbar(ax=[ax2, ax3, ax1], location='right', pad=0.0, shrink=0.5,
panchor=False, use_gridspec=use_gridspec)
plt.colorbar(ax=[ax2, ax3, ax1], location='left', shrink=0.5,
panchor=False, use_gridspec=use_gridspec)
plt.colorbar(ax=[ax1], location='bottom', panchor=False,
anchor=(0.8, 0.5), shrink=0.6, use_gridspec=use_gridspec)
@image_comparison(['contour_colorbar.png'], remove_text=True)
def test_contour_colorbar():
fig, ax = plt.subplots(figsize=(4, 2))
data = np.arange(1200).reshape(30, 40) - 500
levels = np.array([0, 200, 400, 600, 800, 1000, 1200]) - 500
CS = ax.contour(data, levels=levels, extend='both')
fig.colorbar(CS, orientation='horizontal', extend='both')
fig.colorbar(CS, orientation='vertical')
@image_comparison(['cbar_with_subplots_adjust.png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_gridspec_make_colorbar():
plt.figure()
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
plt.subplot(121)
plt.contourf(data, levels=levels)
plt.colorbar(use_gridspec=True, orientation='vertical')
plt.subplot(122)
plt.contourf(data, levels=levels)
plt.colorbar(use_gridspec=True, orientation='horizontal')
plt.subplots_adjust(top=0.95, right=0.95, bottom=0.2, hspace=0.25)
@image_comparison(['colorbar_single_scatter.png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_colorbar_single_scatter():
# Issue #2642: if a path collection has only one entry,
# the norm scaling within the colorbar must ensure a
# finite range, otherwise a zero denominator will occur in _locate.
plt.figure()
x = y = [0]
z = [50]
cmap = plt.get_cmap('jet', 16)
cs = plt.scatter(x, y, z, c=z, cmap=cmap)
plt.colorbar(cs)
@pytest.mark.parametrize('use_gridspec', [False, True],
ids=['no gridspec', 'with gridspec'])
def test_remove_from_figure(use_gridspec):
"""
Test `remove` with the specified ``use_gridspec`` setting
"""
fig, ax = plt.subplots()
sc = ax.scatter([1, 2], [3, 4], cmap="spring")
sc.set_array(np.array([5, 6]))
pre_position = ax.get_position()
cb = fig.colorbar(sc, use_gridspec=use_gridspec)
fig.subplots_adjust()
cb.remove()
fig.subplots_adjust()
post_position = ax.get_position()
assert (pre_position.get_points() == post_position.get_points()).all()
def test_remove_from_figure_cl():
"""
Test `remove` with constrained_layout
"""
fig, ax = plt.subplots(constrained_layout=True)
sc = ax.scatter([1, 2], [3, 4], cmap="spring")
sc.set_array(np.array([5, 6]))
fig.draw_without_rendering()
pre_position = ax.get_position()
cb = fig.colorbar(sc)
cb.remove()
fig.draw_without_rendering()
post_position = ax.get_position()
np.testing.assert_allclose(pre_position.get_points(),
post_position.get_points())
def test_colorbarbase():
# smoke test from #3805
ax = plt.gca()
Colorbar(ax, cmap=plt.cm.bone)
@image_comparison(['colorbar_closed_patch.png'], remove_text=True)
def test_colorbar_closed_patch():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_axes([0.05, 0.85, 0.9, 0.1])
ax2 = fig.add_axes([0.1, 0.65, 0.75, 0.1])
ax3 = fig.add_axes([0.05, 0.45, 0.9, 0.1])
ax4 = fig.add_axes([0.05, 0.25, 0.9, 0.1])
ax5 = fig.add_axes([0.05, 0.05, 0.9, 0.1])
cmap = cm.get_cmap("RdBu", lut=5)
im = ax1.pcolormesh(np.linspace(0, 10, 16).reshape((4, 4)), cmap=cmap)
# The use of a "values" kwarg here is unusual. It works only
# because it is matched to the data range in the image and to
# the number of colors in the LUT.
values = np.linspace(0, 10, 5)
cbar_kw = dict(orientation='horizontal', values=values, ticks=[])
# The wide line is to show that the closed path is being handled
# correctly. See PR #4186.
with rc_context({'axes.linewidth': 16}):
plt.colorbar(im, cax=ax2, extend='both', extendfrac=0.5, **cbar_kw)
plt.colorbar(im, cax=ax3, extend='both', **cbar_kw)
plt.colorbar(im, cax=ax4, extend='both', extendrect=True, **cbar_kw)
plt.colorbar(im, cax=ax5, extend='neither', **cbar_kw)
def test_colorbar_ticks():
# test fix for #5673
fig, ax = plt.subplots()
x = np.arange(-3.0, 4.001)
y = np.arange(-4.0, 3.001)
X, Y = np.meshgrid(x, y)
Z = X * Y
clevs = np.array([-12, -5, 0, 5, 12], dtype=float)
colors = ['r', 'g', 'b', 'c']
cs = ax.contourf(X, Y, Z, clevs, colors=colors, extend='neither')
cbar = fig.colorbar(cs, ax=ax, orientation='horizontal', ticks=clevs)
assert len(cbar.ax.xaxis.get_ticklocs()) == len(clevs)
def test_colorbar_minorticks_on_off():
# test for github issue #11510 and PR #11584
np.random.seed(seed=12345)
data = np.random.randn(20, 20)
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots()
# purposefully setting vmin and vmax to odd fractions
# so as to check for the correct locations of the minor ticks
im = ax.pcolormesh(data, vmin=-2.3, vmax=3.3)
cbar = fig.colorbar(im, extend='both')
# testing after minorticks_on()
cbar.minorticks_on()
np.testing.assert_almost_equal(
cbar.ax.yaxis.get_minorticklocs(),
[-2.2, -1.8, -1.6, -1.4, -1.2, -0.8, -0.6, -0.4, -0.2,
0.2, 0.4, 0.6, 0.8, 1.2, 1.4, 1.6, 1.8, 2.2, 2.4, 2.6, 2.8, 3.2])
# testing after minorticks_off()
cbar.minorticks_off()
np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), [])
im.set_clim(vmin=-1.2, vmax=1.2)
cbar.minorticks_on()
np.testing.assert_almost_equal(
cbar.ax.yaxis.get_minorticklocs(),
[-1.1, -0.9, -0.8, -0.7, -0.6, -0.4, -0.3, -0.2, -0.1,
0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.3])
# tests for github issue #13257 and PR #13265
data = np.random.uniform(low=1, high=10, size=(20, 20))
fig, ax = plt.subplots()
im = ax.pcolormesh(data, norm=LogNorm())
cbar = fig.colorbar(im)
fig.canvas.draw()
default_minorticklocks = cbar.ax.yaxis.get_minorticklocs()
# test that minorticks turn off for LogNorm
cbar.minorticks_off()
np.testing.assert_equal(cbar.ax.yaxis.get_minorticklocs(), [])
# test that minorticks turn back on for LogNorm
cbar.minorticks_on()
np.testing.assert_equal(cbar.ax.yaxis.get_minorticklocs(),
default_minorticklocks)
# test issue #13339: minorticks for LogNorm should stay off
cbar.minorticks_off()
cbar.set_ticks([3, 5, 7, 9])
np.testing.assert_equal(cbar.ax.yaxis.get_minorticklocs(), [])
def test_cbar_minorticks_for_rc_xyminortickvisible():
"""
issue gh-16468.
Making sure that minor ticks on the colorbar are turned on
(internally) using the cbar.minorticks_on() method when
rcParams['xtick.minor.visible'] = True (for horizontal cbar)
rcParams['ytick.minor.visible'] = True (for vertical cbar).
Using cbar.minorticks_on() ensures that the minor | |
<filename>EvalData/admin.py<gh_stars>10-100
"""
Appraise evaluation framework
See LICENSE for usage details
"""
# pylint: disable=C0330
from datetime import datetime
from django.contrib import admin, messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.timezone import utc
from .models import Market, Metadata, TextSegment, TextPair, TextPairWithImage
from .models import TextPairWithContext, TextSegmentWithTwoTargets
from .models import DataAssessmentTask, DataAssessmentResult
from .models import DirectAssessmentTask, DirectAssessmentResult
from .models import DirectAssessmentContextTask, DirectAssessmentContextResult
from .models import DirectAssessmentDocumentTask, DirectAssessmentDocumentResult
from .models import MultiModalAssessmentTask, MultiModalAssessmentResult
from .models import PairwiseAssessmentTask, PairwiseAssessmentResult
from .models import WorkAgenda, TaskAgenda
# TODO:chrife: find a way to use SELECT-based filtering widgets
class BaseMetadataAdmin(admin.ModelAdmin):
"""
Model admin for abstract base metadata object model.
"""
list_display = [
'modifiedBy', 'dateModified'
]
list_filter = [
'activated', 'completed', 'retired'
]
search_fields = [
'createdBy__username', 'activatedBy__username', 'completedBy__username',
'retiredBy__username', 'modifiedBy__username', '_str_name'
]
# pylint: disable=C0111,R0903
class Meta:
abstract = True
fieldsets = (
('Advanced options', {
'classes': ('collapse',),
'fields': ('activated', 'completed', 'retired')
}),
)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
utc_now = datetime.utcnow().replace(tzinfo=utc)
if not hasattr(obj, 'createdBy') or obj.createdBy is None:
obj.createdBy = request.user
obj.dateCreated = utc_now
obj.save()
if obj.activated:
if not hasattr(obj, 'activatedBy') or obj.activatedBy is None:
obj.activatedBy = request.user
obj.dateActivated = utc_now
obj.save()
if obj.completed:
if not hasattr(obj, 'completedBy') or obj.completedBy is None:
obj.completedBy = request.user
obj.dateCompleted = utc_now
obj.save()
if obj.retired:
if not hasattr(obj, 'retiredBy') or obj.retiredBy is None:
obj.retiredBy = request.user
obj.dateRetired = utc_now
obj.save()
obj.modifiedBy = request.user
obj.dateModified = utc_now
obj.save()
super(BaseMetadataAdmin, self).save_model(request, obj, form, change)
class MarketAdmin(BaseMetadataAdmin):
"""
Model admin for Market instances.
"""
list_display = [
'__str__', 'sourceLanguageCode', 'targetLanguageCode', 'domainName'
] + BaseMetadataAdmin.list_display
list_filter = [
'sourceLanguageCode', 'targetLanguageCode', 'domainName'
] + BaseMetadataAdmin.list_filter
search_fields = [
'marketID'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['sourceLanguageCode', 'targetLanguageCode',
'domainName'])
}),
) + BaseMetadataAdmin.fieldsets
class MetadataAdmin(BaseMetadataAdmin):
"""
Model admin for Metadata instances.
"""
list_display = [
'market', 'corpusName', 'versionInfo', 'source'
] + BaseMetadataAdmin.list_display
list_filter = [
'market__marketID', 'corpusName', 'versionInfo'
] + BaseMetadataAdmin.list_filter
search_fields = [
'market__marketID', 'corpusName', 'versionInfo', 'source'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['market', 'corpusName', 'versionInfo', 'source'])
}),
) + BaseMetadataAdmin.fieldsets
class TextSegmentAdmin(BaseMetadataAdmin):
"""
Model admin for TextSegment instances.
"""
list_display = [
'metadata', 'itemID', 'itemType', 'segmentID', 'segmentText'
] + BaseMetadataAdmin.list_display
list_filter = [
'metadata__corpusName', 'metadata__versionInfo',
'metadata__market__sourceLanguageCode',
'metadata__market__targetLanguageCode',
'metadata__market__domainName',
'itemType'
] + BaseMetadataAdmin.list_filter
search_fields = [
'segmentID', 'segmentText'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['metadata', 'itemID', 'itemType', 'segmentID',
'segmentText'])
}),
) + BaseMetadataAdmin.fieldsets
class TextSegmentWithTwoTargetsAdmin(BaseMetadataAdmin):
"""
Model admin for TextPair instances.
"""
list_display = [
'__str__', 'itemID', 'itemType', 'segmentID', 'segmentText',
'target1ID', 'target1Text', 'target2ID', 'target2Text'
] + BaseMetadataAdmin.list_display
list_filter = [
'metadata__corpusName',
'metadata__versionInfo',
'metadata__market__sourceLanguageCode',
'metadata__market__targetLanguageCode',
'metadata__market__domainName',
'itemType'
] + BaseMetadataAdmin.list_filter
search_fields = [
'segmentID', 'segmentText',
'target1ID', 'target1Text',
'target2ID', 'target2Text'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['metadata', 'itemID', 'itemType',
'segmentID', 'segmentText',
'target1ID', 'target1Text',
'target2ID', 'target2Text'])
}),
) + BaseMetadataAdmin.fieldsets
class TextPairAdmin(BaseMetadataAdmin):
"""
Model admin for TextPair instances.
"""
list_display = [
'__str__', 'itemID', 'itemType', 'sourceID', 'sourceText', 'targetID',
'targetText'
] + BaseMetadataAdmin.list_display
list_filter = [
'metadata__corpusName', 'metadata__versionInfo',
'metadata__market__sourceLanguageCode',
'metadata__market__targetLanguageCode',
'metadata__market__domainName',
'itemType'
] + BaseMetadataAdmin.list_filter
search_fields = [
'sourceID', 'sourceText', 'targetID', 'targetText'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['metadata', 'itemID', 'itemType', 'sourceID',
'sourceText', 'targetID', 'targetText'])
}),
) + BaseMetadataAdmin.fieldsets
class TextPairAdmin(BaseMetadataAdmin):
"""
Model admin for TextPair instances.
"""
list_display = [
'__str__', 'itemID', 'itemType', 'sourceID', 'sourceText', 'targetID',
'targetText'
] + BaseMetadataAdmin.list_display
list_filter = [
'metadata__corpusName', 'metadata__versionInfo',
'metadata__market__sourceLanguageCode',
'metadata__market__targetLanguageCode',
'metadata__market__domainName',
'itemType'
] + BaseMetadataAdmin.list_filter
search_fields = [
'sourceID', 'sourceText', 'targetID', 'targetText'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['metadata', 'itemID', 'itemType', 'sourceID',
'sourceText', 'targetID', 'targetText'])
}),
) + BaseMetadataAdmin.fieldsets
class TextPairWithContextAdmin(BaseMetadataAdmin):
"""
Model admin for TextPairWithContext instances.
"""
list_display = [
'__str__', 'itemID', 'itemType', 'documentID', 'isCompleteDocument',
'sourceID', 'sourceText', 'sourceContextLeft', 'sourceContextRight',
'targetID', 'targetText', 'targetContextLeft', 'targetContextRight'
] + BaseMetadataAdmin.list_display
list_filter = [
'metadata__corpusName', 'metadata__versionInfo',
'metadata__market__sourceLanguageCode',
'metadata__market__targetLanguageCode',
'metadata__market__domainName',
'itemType',
'isCompleteDocument'
] + BaseMetadataAdmin.list_filter
search_fields = [
'documentID', 'sourceID', 'targetID',
'sourceText', 'sourceContextLeft', 'sourceContextRight',
'targetText', 'targetContextLeft', 'targetContextRight'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['metadata', 'itemID', 'itemType', 'documentID',
'isCompleteDocument', 'sourceID', 'sourceText', 'sourceContextLeft',
'sourceContextRight', 'targetID', 'targetText', 'targetContextLeft',
'targetContextRight'])
}),
) + BaseMetadataAdmin.fieldsets
class TextPairWithImageAdmin(BaseMetadataAdmin):
"""
Model admin for TextPairWithImage instances.
"""
list_display = [
'__str__', 'itemID', 'itemType', 'sourceID', 'sourceText', 'targetID',
'targetText', 'imageURL'
] + BaseMetadataAdmin.list_display
list_filter = [
'metadata__corpusName', 'metadata__versionInfo',
'metadata__market__sourceLanguageCode',
'metadata__market__targetLanguageCode',
'metadata__market__domainName',
'itemType'
] + BaseMetadataAdmin.list_filter
search_fields = [
'sourceID', 'sourceText', 'targetID', 'targetText'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['metadata', 'itemID', 'itemType', 'sourceID',
'sourceText', 'targetID', 'targetText', 'imageURL'])
}),
) + BaseMetadataAdmin.fieldsets
class DirectAssessmentTaskAdmin(BaseMetadataAdmin):
"""
Model admin for DirectAssessmentTask instances.
"""
list_display = [
'dataName', 'batchNo', 'campaign', 'requiredAnnotations'
] + BaseMetadataAdmin.list_display
list_filter = [
'campaign__campaignName',
'campaign__batches__market__targetLanguageCode',
'campaign__batches__market__sourceLanguageCode', 'batchData'
] + BaseMetadataAdmin.list_filter
search_fields = [
'campaign__campaignName', 'assignedTo'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['batchData', 'batchNo', 'campaign', 'items',
'requiredAnnotations', 'assignedTo'])
}),
) + BaseMetadataAdmin.fieldsets
class DirectAssessmentResultAdmin(BaseMetadataAdmin):
"""
Model admin for DirectAssessmentResult instances.
"""
list_display = [
'__str__', 'score', 'start_time', 'end_time', 'duration', 'item_type'
] + BaseMetadataAdmin.list_display
list_filter = [
'item__itemType', 'task__completed'
] + BaseMetadataAdmin.list_filter
search_fields = [
# nothing model specific
] + BaseMetadataAdmin.search_fields
readonly_fields = ('item', 'task')
fieldsets = (
(None, {
'fields': (['score', 'start_time', 'end_time'])
}),
('Related', {
'fields': (['item', 'task'])
})
) + BaseMetadataAdmin.fieldsets
class DirectAssessmentContextTaskAdmin(BaseMetadataAdmin):
"""
Model admin for DirectAssessmentContextTask instances.
"""
list_display = [
'dataName', 'batchNo', 'campaign', 'requiredAnnotations'
] + BaseMetadataAdmin.list_display
list_filter = [
'campaign__campaignName',
'campaign__batches__market__targetLanguageCode',
'campaign__batches__market__sourceLanguageCode', 'batchData'
] + BaseMetadataAdmin.list_filter
search_fields = [
'campaign__campaignName', 'assignedTo'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['batchData', 'batchNo', 'campaign', 'items',
'requiredAnnotations', 'assignedTo'])
}),
) + BaseMetadataAdmin.fieldsets
class DirectAssessmentContextResultAdmin(BaseMetadataAdmin):
"""
Model admin for DirectAssessmentContextResult instances.
"""
list_display = [
'__str__', 'score', 'start_time', 'end_time', 'duration', 'item_type',
] + BaseMetadataAdmin.list_display
list_filter = [
'item__itemType', 'task__completed', 'item__isCompleteDocument'
] + BaseMetadataAdmin.list_filter
search_fields = [
# nothing model specific
] + BaseMetadataAdmin.search_fields
readonly_fields = ('item', 'task')
fieldsets = (
(None, {
'fields': (['score', 'start_time', 'end_time'])
}),
('Related', {
'fields': (['item', 'task'])
})
) + BaseMetadataAdmin.fieldsets
class DirectAssessmentDocumentTaskAdmin(DirectAssessmentContextTaskAdmin):
"""
Model admin for DirectAssessmentDocumentTask instances.
"""
pass
class DirectAssessmentDocumentResultAdmin(DirectAssessmentContextResultAdmin):
"""
Model admin for DirectAssessmentDocumentResult instances.
"""
pass
class MultiModalAssessmentTaskAdmin(BaseMetadataAdmin):
"""
Model admin for MultiModalAssessmentTask instances.
"""
list_display = [
'dataName', 'batchNo', 'campaign', 'requiredAnnotations'
] + BaseMetadataAdmin.list_display
list_filter = [
'campaign__campaignName',
'campaign__batches__market__targetLanguageCode',
'campaign__batches__market__sourceLanguageCode', 'batchData'
] + BaseMetadataAdmin.list_filter
search_fields = [
'campaign__campaignName', 'assignedTo'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['batchData', 'batchNo', 'campaign', 'items',
'requiredAnnotations', 'assignedTo'])
}),
) + BaseMetadataAdmin.fieldsets
class MultiModalAssessmentResultAdmin(BaseMetadataAdmin):
"""
Model admin for MultiModalAssessmentResult instances.
"""
list_display = [
'__str__', 'score', 'start_time', 'end_time', 'duration', 'item_type'
] + BaseMetadataAdmin.list_display
list_filter = [
'item__itemType', 'task__completed'
] + BaseMetadataAdmin.list_filter
search_fields = [
# nothing model specific
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['score', 'start_time', 'end_time', 'item', 'task'])
}),
) + BaseMetadataAdmin.fieldsets
class WorkAgendaAdmin(admin.ModelAdmin):
"""
Model admin for WorkAgenda object model.
"""
list_display = [
'user', 'campaign', 'completed'
]
list_filter = [
'campaign'
]
search_fields = [
'user__username', 'campaign__campaignName',
]
class TaskAgendaAdmin(admin.ModelAdmin):
"""
Model admin for TaskAgenda object model.
"""
actions = ['reset_taskagenda']
list_display = [
'user', 'campaign', 'completed'
]
list_filter = [
'campaign'
]
search_fields = [
'user__username', 'campaign__campaignName',
]
def get_actions(self, request):
"""
Reset task agenda action requires reset_taskagenda permission.
"""
actions = super(TaskAgendaAdmin, self).get_actions(request)
if 'reset_taskagenda' in actions:
if not request.user.has_perm('EvalData.reset_taskagenda'):
del actions['reset_taskagenda']
return actions
def reset_taskagenda(self, request, queryset):
"""
Handles reset task agenda admin action for TaskAgenda instances.
"""
agendas_selected = queryset.count()
if agendas_selected > 1:
_msg = (
"You can only reset one task agenda at a time. "
"No items have been changed."
)
self.message_user(request, _msg, level=messages.WARNING)
return HttpResponseRedirect(
reverse('admin:EvalData_taskagenda_changelist'))
_pk = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
return HttpResponseRedirect(
reverse('reset-taskagenda', args=_pk))
reset_taskagenda.short_description = "Reset task agenda"
class PairwiseAssessmentTaskAdmin(BaseMetadataAdmin):
"""
Model admin for PairwiseAssessmentTask instances.
"""
list_display = [
'dataName', 'batchNo', 'campaign', 'requiredAnnotations'
] + BaseMetadataAdmin.list_display
list_filter = [
'campaign__campaignName',
'campaign__batches__market__targetLanguageCode',
'campaign__batches__market__sourceLanguageCode', 'batchData'
] + BaseMetadataAdmin.list_filter
search_fields = [
'campaign__campaignName', 'assignedTo'
] + BaseMetadataAdmin.search_fields
fieldsets = (
(None, {
'fields': (['batchData', 'batchNo', 'campaign', 'items',
'requiredAnnotations', 'assignedTo'])
}),
) + BaseMetadataAdmin.fieldsets
class PairwiseAssessmentResultAdmin(BaseMetadataAdmin):
"""
Model admin for PairwiseAssessmentResult instances.
"""
list_display = [
'__str__', 'score1', 'score2', 'start_time', 'end_time', 'duration',
'item_type'
] + BaseMetadataAdmin.list_display
list_filter = [
'item__itemType', 'task__completed'
] + BaseMetadataAdmin.list_filter
| |
<filename>environments/sokoban.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Adapted from https://github.com/mpSchrader/gym-sokoban
# <NAME>, 2018.
import os
from os import listdir
from os import path
from os.path import isfile, join
import random
import numpy as np
from utils.graph import Graph
from utils.graph import Entity
import zipfile
from utils.spec_reader import spec
SOKOBAN_MAX_STEPS = spec.val("SOKOBAN_MAX_STEPS")
SOKOBAN_DIFFICULTY = spec.val("SOKOBAN_DIFFICULTY")
SOKOBAN_SPLIT = spec.val("SOKOBAN_SPLIT")
SOKOBAN_ROOM_OVERRIDE = spec.val("SOKOBAN_ROOM_OVERRIDE")
SOKOBAN_BOXES_REQUIRED = spec.val("SOKOBAN_BOXES_REQUIRED")
SOKOBAN_OBSERVATION_FORMAT = spec.val("SOKOBAN_OBSERVATION_FORMAT")
SOKOBAN_REWARD_PER_STEP = spec.val("SOKOBAN_REWARD_PER_STEP")
SOKOBAN_REWARD_SUCCESS = spec.val("SOKOBAN_REWARD_SUCCESS")
PIXELS_PER_TILE = 6 # Each tile is one pixel in the original Sokoban images, 8 pixels per cell, and 10x10 cells in a puzzle.
TILES_PER_CELL = 8
PUZZLE_SCALE = PIXELS_PER_TILE * TILES_PER_CELL
PUZZLE_SIZE = 10
# Cell state codes
WALL = 0
FLOOR = 1
TARGET = 2
BOX_ON_TARGET = 3
BOX_ON_FLOOR = 4
AGENT_ON_FLOOR = 5
AGENT_ON_TARGET = 6
CHANGE_COORDINATES = {
0: (-1, 0), # 0: Move up
1: (1, 0), # 1: Move down
2: (0, -1), # 2: Move left
3: (0, 1) # 3: Move right
}
ACTION_NAMES = ['Ponder', 'Up', 'Down', 'Left', 'Right']
class Sokoban_Env(object):
def __init__(self, seed):
self.rand = random.Random(seed)
self.num_boxes = 4
self.boxes_on_target = 0
self.max_steps_per_episode = SOKOBAN_MAX_STEPS * SOKOBAN_BOXES_REQUIRED / self.num_boxes
# Penalties and Rewards
self.penalty_for_step = SOKOBAN_REWARD_PER_STEP
self.penalty_box_off_target = -1
self.reward_box_on_target = 1
self.reward_finished = SOKOBAN_REWARD_SUCCESS * SOKOBAN_BOXES_REQUIRED / self.num_boxes
self.reward_last = 0
# Other Settings
self.action_space = 5
if SOKOBAN_OBSERVATION_FORMAT == 'grid':
self.observation_space = 400
self.observation = np.zeros((10,10,4), dtype=np.uint8)
elif SOKOBAN_OBSERVATION_FORMAT == 'factored':
self.observation = Graph()
self.num_factor_positions = 15
self.factor_position_offset = (self.num_factor_positions - 1) // 2
self.cell_factor_size = 0
self.cell_factor_size += 2 * (self.num_factor_positions + 1) # Cell position.
self.cell_factor_size += 3 # Cell identity.
self.cell_factor_size += 4
self.core_obs_size = self.action_space + 1
self.core_obs_size += 1 + 4 # On target, four walls.
self.observation.entity_type_sizes.append(self.core_obs_size)
self.observation.entity_type_sizes.append(self.cell_factor_size)
self.observation_space = self.observation
self.use_display = False
self.num_cols_or_rows = PUZZLE_SIZE
self.pix_per_cell = PUZZLE_SCALE
self.wid = self.num_cols_or_rows
self.x_orig = -self.wid * self.pix_per_cell / 2
self.y_orig = self.wid * self.pix_per_cell / 2
self.agent_col = None
self.agent_row = None
self.reset_online_test_sums()
self.score = 0.
self.reward = 0.
self.action = None
# Cell channel encodings.
self.encodings = np.array(((1,0,0,0),(0,0,0,0),(0,1,0,0),(0,1,1,0),(0,0,1,0),(0,0,0,1),(0,1,0,1)), dtype=np.uint8)
self.total_steps = 0
self.total_reward = 0.
self.total_episodes = 0
self.total_episodes_won = 0.
def reset(self, repeat=False, episode_id = None):
self.train_data_dir = os.path.join('data', 'boxoban-levels-master', SOKOBAN_DIFFICULTY, SOKOBAN_SPLIT)
self.select_room(repeat, episode_id)
self.num_env_steps = 0
self.reward_last = 0
self.boxes_on_target = 0
self.agent_col = None
self.agent_row = None
self.action = None
self.draw()
return self.assemble_current_observation(0, 0.)
def step(self, action):
self.action = action
self.num_env_steps += 1
self.new_box_position = None
self.old_box_position = None
moved_box = False
if action == 0:
moved_player = False
else:
moved_player, moved_box = self._push(action)
self._calc_reward()
self.done = self._check_if_done()
self.reward = self.reward_last
self.score += self.reward
self.update_display()
self.update_online_test_sums(self.reward, self.done)
ret = self.assemble_current_observation(action, self.reward), self.reward, self.done
self.draw_text()
return ret
def add_wall_bits(self, row, col, vec, id):
if self.cell_state(row, col - 1) == WALL:
vec[id + 0] = 1.
if self.cell_state(row - 1, col) == WALL:
vec[id + 1] = 1.
if self.cell_state(row, col + 1) == WALL:
vec[id + 2] = 1.
if self.cell_state(row + 1, col) == WALL:
vec[id + 3] = 1.
return id + 4
def assemble_current_observation(self, action, reward):
if SOKOBAN_OBSERVATION_FORMAT == 'grid':
for row in range(10):
for col in range(10):
state = self.cell_state(row, col)
self.observation[row][col][:] = self.encodings[state]
elif SOKOBAN_OBSERVATION_FORMAT == 'factored':
self.assemble_observation_graph(action, reward)
return self.observation
def assemble_observation_graph(self, action, reward):
self.observation.clear()
self.agent_row = self.player_position[0]
self.agent_col = self.player_position[1]
# Handle the core.
core_entity = Entity(0, self.agent_row, self.agent_col)
core_entity.data = np.zeros(self.core_obs_size, np.float32)
i = 0
core_entity.data[i + action] = 1.
i += self.action_space
core_entity.data[i] = reward
i += 1
if self.cell_state(self.agent_row, self.agent_col) == AGENT_ON_TARGET:
core_entity.data[i] = 1.
i += 1
i = self.add_wall_bits(self.agent_row, self.agent_col, core_entity.data, i)
assert i == self.core_obs_size
self.observation.entities.append(core_entity)
# Handle the factors/percepts.
for row in range(self.num_cols_or_rows):
for col in range(self.num_cols_or_rows):
state = self.cell_state(row, col)
if (state == WALL) or (state == AGENT_ON_TARGET) or (state == AGENT_ON_FLOOR):
continue
factor = Entity(1, row, col)
factor.data = np.zeros(self.cell_factor_size, np.float32)
i = 0
# Encode the cell's row and col positions with respect to the agent.
i = self.encode_position(col - self.agent_col, factor.data, i)
i = self.encode_position(row - self.agent_row, factor.data, i)
# Cell identity.
bits = self.encodings[state]
factor.data[i] = bits[1]
i += 1
factor.data[i] = bits[2]
i += 1
factor.data[i] = bits[3]
i += 1
i = self.add_wall_bits(row, col, factor.data, i)
assert i == self.cell_factor_size
self.observation.entities.append(factor)
def encode_position(self, pos, buf, i):
buf[i] = pos / self.factor_position_offset
i += 1
buf[i + self.factor_position_offset + pos] = 1.
i += self.num_factor_positions
return i
def x_pix_from_col(self, col):
return self.x_orig + col * self.pix_per_cell
def y_pix_from_row(self, row):
return self.y_orig - row * self.pix_per_cell
def draw_line(self, x1, y1, x2, y2, color):
self.t.color(color)
self.t.setpos(x1, y1)
self.t.pendown()
self.t.goto(x2, y2)
self.t.penup()
def draw_rect(self, x1, y1, x2, y2, color):
self.t.pensize(1)
self.t.begin_fill()
self.t.color(color)
self.t.setpos(x1, y1)
self.t.pendown()
self.t.goto(x1, y2)
self.t.goto(x2, y2)
self.t.goto(x2, y1)
self.t.goto(x1, y1)
self.t.penup()
self.t.end_fill()
def cell_state(self, row, col):
state = self.room_state[row][col]
if (state == AGENT_ON_FLOOR) and (self.room_fixed[row][col] == TARGET):
state = AGENT_ON_TARGET # Patch up this one missing case.
return state
def render_cell(self, row, col):
state = self.cell_state(row, col)
draw_ball = False
draw_x = False
rad = self.pix_per_cell / 2
wall_color = 'gray'
floor_color = 'black'
target_color = '#a00000'
if state == WALL:
background_color = wall_color
elif state == FLOOR:
background_color = floor_color
elif state == TARGET:
background_color = target_color
elif state == BOX_ON_TARGET:
background_color = target_color
draw_x = True
elif state == BOX_ON_FLOOR:
background_color = floor_color
draw_x = True
elif state == AGENT_ON_FLOOR:
background_color = floor_color
draw_ball = True
elif state == AGENT_ON_TARGET:
background_color = target_color
draw_ball = True
x = self.x_pix_from_col(col)
y = self.y_pix_from_row(row)
self.t.color(background_color)
self.draw_rect(x - rad, y - rad, x + rad - 1, y + rad - 1, background_color)
if draw_ball:
self.t.setpos(x, y)
self.t.pensize(rad)
self.t.dot(self.pix_per_cell, '#00ff00')
if draw_x:
line_wid = 3
margin = line_wid - 1
self.t.pensize(line_wid)
xl = x-rad+margin
xr = x+rad-margin
yt = y+rad-margin
yb = y-rad+margin
box_color = 'yellow'
self.draw_line(xl, yb, xr, yt, box_color)
self.draw_line(xl, yt, xr, yb, box_color)
self.draw_line(xl, yb, xl, yt, box_color)
self.draw_line(xl, yt, xr, yt, box_color)
self.draw_line(xr, yt, xr, yb, box_color)
self.draw_line(xr, yb, xl, yb, box_color)
def draw_text(self):
if not self.use_display:
return
rad = self.num_cols_or_rows * self.pix_per_cell / 2
# Draw text below.
self.draw_rect(-rad, -rad - 27, -rad + 800, -rad + 12, 'light gray')
self.t.color('black')
self.t.setpos(-rad, -rad - 26)
self.t.write('Last reward: {:4.1f} Total reward:{:5.1f}'.format(self.reward, self.score), font=("FixedSys", 16, "normal"))
self.t.setpos(-rad, -rad - 6)
if self.action is None:
action_name = 'None'
else:
action_name = ACTION_NAMES[self.action]
self.t.write('Last action: {:10s} Steps taken: {}'.format(action_name, self.num_env_steps), font=("FixedSys", 16, "normal"))
# self.draw_rect(-rad, rad + 71, -rad + 800, rad + 110, 'light gray')
# self.t.setpos(-rad, rad + 70)
# self.t.color('black')
# self.t.write('Observation: {}'.format(self.observation), font=("Arial", 16, "normal"))
def draw(self):
if self.use_display:
self.t.shape('square')
self.agent_row = self.player_position[0]
self.agent_col = self.player_position[1]
for row in range(self.num_cols_or_rows):
for col in range(self.num_cols_or_rows):
self.render_cell(row, col)
self.t._update()
self.draw_text()
def update_display(self):
if self.use_display:
# Did the agent just move?
old_agent_row = self.agent_row
old_agent_col = self.agent_col
new_agent_row = self.player_position[0]
new_agent_col = self.player_position[1]
if (new_agent_row != old_agent_row) or (new_agent_col != old_agent_col):
# Yes. Render the two cells involved.
self.render_cell(old_agent_row, old_agent_col)
self.render_cell(new_agent_row, new_agent_col)
# Also render the next cell if it contains a box, in case it was just pushed there.
next_row = new_agent_row + (new_agent_row - old_agent_row)
next_col = new_agent_col + (new_agent_col - old_agent_col)
if (next_row >= 0) and (next_row < self.num_cols_or_rows) and \
(next_col >= 0) and (next_col < self.num_cols_or_rows):
if (self.cell_state(next_row, next_col) == BOX_ON_FLOOR) or \
(self.cell_state(next_row, next_col) == BOX_ON_TARGET):
self.render_cell(next_row, next_col)
# Update the agent location.
self.agent_row = new_agent_row
self.agent_col = new_agent_col
def translate_key_to_action(self, key):
key = key.lower()
action = -1
if key == 'up':
action = 1
elif key == 'left':
action = 3
elif key == 'down':
action = 2
elif key == 'right':
action = 4
elif key == 'space':
action = None
elif key == 'delete':
self.reset()
elif key == 'r':
self.reset(True)
elif key == 'n':
self.reset()
else:
print(("Key not found"))
#print("action = {}".format(action))
return action
def select_room(self, repeat=False, episode_id = None):
if not repeat:
generated_files = [f for f in listdir(self.train_data_dir) if isfile(join(self.train_data_dir, f))]
if self.total_steps == 0:
print("{} puzzle files found.".format(len(generated_files)))
generated_files.sort()
if SOKOBAN_ROOM_OVERRIDE is None:
if episode_id | |
<= 0)
m.c1429 = Constraint(expr= m.x1428 - m.b3010 <= 0)
m.c1430 = Constraint(expr= m.x1429 - m.b3010 <= 0)
m.c1431 = Constraint(expr= m.x1430 - m.b3010 <= 0)
m.c1432 = Constraint(expr= m.x1431 - m.b3010 <= 0)
m.c1433 = Constraint(expr= m.x1432 - m.b3010 <= 0)
m.c1434 = Constraint(expr= m.x1433 - m.b3010 <= 0)
m.c1435 = Constraint(expr= m.x1434 - m.b3010 <= 0)
m.c1436 = Constraint(expr= m.x1435 - m.b3010 <= 0)
m.c1437 = Constraint(expr= m.x1436 - m.b3010 <= 0)
m.c1438 = Constraint(expr= m.x1437 - m.b3010 <= 0)
m.c1439 = Constraint(expr= m.x1438 - m.b3010 <= 0)
m.c1440 = Constraint(expr= m.x1439 - m.b3010 <= 0)
m.c1441 = Constraint(expr= m.x1440 - m.b3010 <= 0)
m.c1442 = Constraint(expr= m.x1441 - m.b3010 <= 0)
m.c1443 = Constraint(expr= m.x1442 - m.b3010 <= 0)
m.c1444 = Constraint(expr= m.x1443 - m.b3010 <= 0)
m.c1445 = Constraint(expr= m.x1444 - m.b3010 <= 0)
m.c1446 = Constraint(expr= m.x1445 - m.b3010 <= 0)
m.c1447 = Constraint(expr= m.x1446 - m.b3010 <= 0)
m.c1448 = Constraint(expr= m.x1447 - m.b3010 <= 0)
m.c1449 = Constraint(expr= m.x1448 - m.b3010 <= 0)
m.c1450 = Constraint(expr= m.x1449 - m.b3010 <= 0)
m.c1451 = Constraint(expr= m.x1450 - m.b3010 <= 0)
m.c1452 = Constraint(expr= m.x1451 - m.b3010 <= 0)
m.c1453 = Constraint(expr= m.x1452 - m.b3010 <= 0)
m.c1454 = Constraint(expr= m.x1453 - m.b3010 <= 0)
m.c1455 = Constraint(expr= m.x1454 - m.b3010 <= 0)
m.c1456 = Constraint(expr= m.x1455 - m.b3010 <= 0)
m.c1457 = Constraint(expr= m.x1456 - m.b3010 <= 0)
m.c1458 = Constraint(expr= m.x1457 - m.b3010 <= 0)
m.c1459 = Constraint(expr= m.x1458 - m.b3010 <= 0)
m.c1460 = Constraint(expr= m.x1459 - m.b3010 <= 0)
m.c1461 = Constraint(expr= m.x1460 - m.b3010 <= 0)
m.c1462 = Constraint(expr= m.x1461 - m.b3010 <= 0)
m.c1463 = Constraint(expr= m.x1462 - m.b3010 <= 0)
m.c1464 = Constraint(expr= m.x1463 - m.b3010 <= 0)
m.c1465 = Constraint(expr= m.x1464 - m.b3010 <= 0)
m.c1466 = Constraint(expr= m.x1465 - m.b3010 <= 0)
m.c1467 = Constraint(expr= m.x1466 - m.b3010 <= 0)
m.c1468 = Constraint(expr= m.x1467 - m.b3010 <= 0)
m.c1469 = Constraint(expr= m.x1468 - m.b3010 <= 0)
m.c1470 = Constraint(expr= m.x1469 - m.b3010 <= 0)
m.c1471 = Constraint(expr= m.x1470 - m.b3010 <= 0)
m.c1472 = Constraint(expr= m.x1471 - m.b3010 <= 0)
m.c1473 = Constraint(expr= m.x1472 - m.b3010 <= 0)
m.c1474 = Constraint(expr= m.x1473 - m.b3010 <= 0)
m.c1475 = Constraint(expr= m.x1474 - m.b3010 <= 0)
m.c1476 = Constraint(expr= m.x1475 - m.b3010 <= 0)
m.c1477 = Constraint(expr= m.x1476 - m.b3010 <= 0)
m.c1478 = Constraint(expr= m.x1477 - m.b3010 <= 0)
m.c1479 = Constraint(expr= m.x1478 - m.b3010 <= 0)
m.c1480 = Constraint(expr= m.x1479 - m.b3010 <= 0)
m.c1481 = Constraint(expr= m.x1480 - m.b3010 <= 0)
m.c1482 = Constraint(expr= m.x1481 - m.b3010 <= 0)
m.c1483 = Constraint(expr= m.x1482 - m.b3010 <= 0)
m.c1484 = Constraint(expr= m.x1483 - m.b3010 <= 0)
m.c1485 = Constraint(expr= m.x1484 - m.b3010 <= 0)
m.c1486 = Constraint(expr= m.x1485 - m.b3010 <= 0)
m.c1487 = Constraint(expr= m.x1486 - m.b3010 <= 0)
m.c1488 = Constraint(expr= m.x1487 - m.b3010 <= 0)
m.c1489 = Constraint(expr= m.x1488 - m.b3010 <= 0)
m.c1490 = Constraint(expr= m.x1489 - m.b3010 <= 0)
m.c1491 = Constraint(expr= m.x1490 - m.b3010 <= 0)
m.c1492 = Constraint(expr= m.x1491 - m.b3010 <= 0)
m.c1493 = Constraint(expr= m.x1492 - m.b3010 <= 0)
m.c1494 = Constraint(expr= m.x1493 - m.b3010 <= 0)
m.c1495 = Constraint(expr= m.x1494 - m.b3010 <= 0)
m.c1496 = Constraint(expr= m.x1495 - m.b3010 <= 0)
m.c1497 = Constraint(expr= m.x1496 - m.b3010 <= 0)
m.c1498 = Constraint(expr= m.x1497 - m.b3010 <= 0)
m.c1499 = Constraint(expr= m.x1498 - m.b3010 <= 0)
m.c1500 = Constraint(expr= m.x1499 - m.b3010 <= 0)
m.c1501 = Constraint(expr= m.x1500 - m.b3010 <= 0)
m.c1502 = Constraint(expr= m.x1501 - m.b3011 <= 0)
m.c1503 = Constraint(expr= m.x1502 - m.b3011 <= 0)
m.c1504 = Constraint(expr= m.x1503 - m.b3011 <= 0)
m.c1505 = Constraint(expr= m.x1504 - m.b3011 <= 0)
m.c1506 = Constraint(expr= m.x1505 - m.b3011 <= 0)
m.c1507 = Constraint(expr= m.x1506 - m.b3011 <= 0)
m.c1508 = Constraint(expr= m.x1507 - m.b3011 <= 0)
m.c1509 = Constraint(expr= m.x1508 - m.b3011 <= 0)
m.c1510 = Constraint(expr= m.x1509 - m.b3011 <= 0)
m.c1511 = Constraint(expr= m.x1510 - m.b3011 <= 0)
m.c1512 = Constraint(expr= m.x1511 - m.b3011 <= 0)
m.c1513 = Constraint(expr= m.x1512 - m.b3011 <= 0)
m.c1514 = Constraint(expr= m.x1513 - m.b3011 <= 0)
m.c1515 = Constraint(expr= m.x1514 - m.b3011 <= 0)
m.c1516 = Constraint(expr= m.x1515 - m.b3011 <= 0)
m.c1517 = Constraint(expr= m.x1516 - m.b3011 <= 0)
m.c1518 = Constraint(expr= m.x1517 - m.b3011 <= 0)
m.c1519 = Constraint(expr= m.x1518 - m.b3011 <= 0)
m.c1520 = Constraint(expr= m.x1519 - m.b3011 <= 0)
m.c1521 = Constraint(expr= m.x1520 - m.b3011 <= 0)
m.c1522 = Constraint(expr= m.x1521 - m.b3011 <= 0)
m.c1523 = Constraint(expr= m.x1522 - m.b3011 <= 0)
m.c1524 = Constraint(expr= m.x1523 - m.b3011 <= 0)
m.c1525 = Constraint(expr= m.x1524 - m.b3011 <= 0)
m.c1526 = Constraint(expr= m.x1525 - m.b3011 <= 0)
m.c1527 = Constraint(expr= m.x1526 - m.b3011 <= 0)
m.c1528 = Constraint(expr= m.x1527 - m.b3011 <= 0)
m.c1529 = Constraint(expr= m.x1528 - m.b3011 <= 0)
m.c1530 = Constraint(expr= m.x1529 - m.b3011 <= 0)
m.c1531 = Constraint(expr= m.x1530 - m.b3011 <= 0)
m.c1532 = Constraint(expr= m.x1531 - m.b3011 <= 0)
m.c1533 = Constraint(expr= m.x1532 - m.b3011 <= 0)
m.c1534 = Constraint(expr= m.x1533 - m.b3011 <= 0)
m.c1535 = Constraint(expr= m.x1534 - m.b3011 <= 0)
m.c1536 = Constraint(expr= m.x1535 - m.b3011 <= 0)
m.c1537 = Constraint(expr= m.x1536 - m.b3011 <= 0)
m.c1538 = Constraint(expr= m.x1537 - m.b3011 <= 0)
m.c1539 = Constraint(expr= m.x1538 - m.b3011 <= 0)
m.c1540 = Constraint(expr= m.x1539 - m.b3011 <= 0)
m.c1541 = Constraint(expr= m.x1540 - m.b3011 <= 0)
m.c1542 = Constraint(expr= m.x1541 - m.b3011 <= 0)
m.c1543 = Constraint(expr= m.x1542 - m.b3011 <= 0)
m.c1544 = Constraint(expr= m.x1543 - m.b3011 <= 0)
m.c1545 = Constraint(expr= m.x1544 - m.b3011 <= 0)
m.c1546 = Constraint(expr= m.x1545 - m.b3011 <= 0)
m.c1547 = Constraint(expr= m.x1546 - m.b3011 <= 0)
m.c1548 = Constraint(expr= m.x1547 - m.b3011 <= 0)
m.c1549 = Constraint(expr= m.x1548 - m.b3011 <= 0)
m.c1550 = Constraint(expr= m.x1549 - m.b3011 <= 0)
m.c1551 = Constraint(expr= m.x1550 - m.b3011 <= 0)
m.c1552 = Constraint(expr= m.x1551 - m.b3011 <= 0)
m.c1553 = Constraint(expr= m.x1552 - m.b3011 <= 0)
m.c1554 = Constraint(expr= m.x1553 - m.b3011 <= 0)
m.c1555 = Constraint(expr= m.x1554 - m.b3011 <= 0)
m.c1556 = Constraint(expr= m.x1555 - m.b3011 <= 0)
m.c1557 = Constraint(expr= m.x1556 - m.b3011 <= 0)
m.c1558 = Constraint(expr= m.x1557 - m.b3011 <= 0)
m.c1559 = Constraint(expr= m.x1558 - m.b3011 <= 0)
m.c1560 = Constraint(expr= m.x1559 - m.b3011 <= 0)
m.c1561 = Constraint(expr= m.x1560 - m.b3011 <= 0)
m.c1562 = Constraint(expr= m.x1561 - m.b3011 <= 0)
m.c1563 = Constraint(expr= m.x1562 - m.b3011 <= 0)
m.c1564 = Constraint(expr= m.x1563 - m.b3011 <= 0)
m.c1565 = Constraint(expr= m.x1564 - m.b3011 <= 0)
m.c1566 = Constraint(expr= m.x1565 - m.b3011 <= 0)
m.c1567 = Constraint(expr= m.x1566 - m.b3011 <= 0)
m.c1568 = Constraint(expr= m.x1567 - m.b3011 <= 0)
m.c1569 = Constraint(expr= m.x1568 - m.b3011 <= 0)
m.c1570 = Constraint(expr= m.x1569 - m.b3011 <= 0)
m.c1571 = Constraint(expr= m.x1570 - m.b3011 <= 0)
m.c1572 = Constraint(expr= m.x1571 - m.b3011 <= 0)
m.c1573 = Constraint(expr= m.x1572 - m.b3011 <= 0)
m.c1574 = Constraint(expr= m.x1573 - m.b3011 <= 0)
m.c1575 = Constraint(expr= m.x1574 - m.b3011 <= 0)
m.c1576 = Constraint(expr= m.x1575 - m.b3011 <= 0)
m.c1577 = Constraint(expr= m.x1576 - m.b3011 <= 0)
m.c1578 = Constraint(expr= m.x1577 - m.b3011 <= 0)
m.c1579 = Constraint(expr= m.x1578 - m.b3011 <= 0)
m.c1580 = Constraint(expr= m.x1579 - m.b3011 <= 0)
m.c1581 = Constraint(expr= m.x1580 - m.b3011 <= 0)
m.c1582 = Constraint(expr= m.x1581 - m.b3011 <= 0)
m.c1583 = Constraint(expr= m.x1582 - m.b3011 <= 0)
m.c1584 = Constraint(expr= m.x1583 - m.b3011 <= 0)
m.c1585 = Constraint(expr= m.x1584 - m.b3011 <= 0)
m.c1586 = Constraint(expr= m.x1585 - m.b3011 <= 0)
m.c1587 = Constraint(expr= m.x1586 - m.b3011 <= 0)
m.c1588 = Constraint(expr= m.x1587 - m.b3011 <= 0)
m.c1589 = Constraint(expr= m.x1588 - m.b3011 <= 0)
m.c1590 = Constraint(expr= m.x1589 - m.b3011 <= 0)
m.c1591 = Constraint(expr= m.x1590 - m.b3011 <= 0)
m.c1592 = Constraint(expr= m.x1591 - m.b3011 <= 0)
m.c1593 = Constraint(expr= m.x1592 - m.b3011 <= 0)
m.c1594 = Constraint(expr= m.x1593 - m.b3011 <= 0)
m.c1595 = Constraint(expr= m.x1594 - m.b3011 <= 0)
m.c1596 = Constraint(expr= m.x1595 - m.b3011 <= 0)
m.c1597 = Constraint(expr= m.x1596 - m.b3011 <= 0)
m.c1598 = Constraint(expr= m.x1597 - m.b3011 <= 0)
m.c1599 = Constraint(expr= m.x1598 - m.b3011 <= 0)
m.c1600 = Constraint(expr= m.x1599 - m.b3011 <= 0)
m.c1601 = Constraint(expr= m.x1600 - m.b3011 <= 0)
m.c1602 = Constraint(expr= m.x1601 - m.b3011 <= 0)
m.c1603 = Constraint(expr= m.x1602 - m.b3011 <= 0)
m.c1604 = Constraint(expr= m.x1603 - m.b3011 <= 0)
m.c1605 = Constraint(expr= m.x1604 - m.b3011 <= 0)
m.c1606 = Constraint(expr= m.x1605 - m.b3011 <= 0)
m.c1607 = Constraint(expr= m.x1606 - m.b3011 <= 0)
m.c1608 = Constraint(expr= m.x1607 - m.b3011 <= 0)
m.c1609 = Constraint(expr= m.x1608 - m.b3011 <= 0)
m.c1610 = Constraint(expr= m.x1609 - m.b3011 <= 0)
m.c1611 = Constraint(expr= m.x1610 - | |
(if any), else async
enabled_sections: Sections to load regardless of current settings
Returns:
Queryset with all requested eve objects
"""
ids = set(map(int, ids))
enabled_sections = self.model._enabled_sections_union(enabled_sections)
enabled_sections_filter = self._enabled_sections_filter(enabled_sections)
existing_ids = set(
self.filter(id__in=ids)
.filter(**enabled_sections_filter)
.values_list("id", flat=True)
)
for id in ids.difference(existing_ids):
self.update_or_create_esi(
id=int(id),
include_children=include_children,
wait_for_children=wait_for_children,
enabled_sections=enabled_sections,
)
return self.filter(id__in=ids)
class EvePlanetManager(EveUniverseEntityModelManager):
def _fetch_from_esi(self, id: int, enabled_sections: Iterable[str] = None) -> dict:
from .models import EveSolarSystem
esi_data = super()._fetch_from_esi(id=id)
# no need to proceed if all children have been disabled
if not self.model._children(enabled_sections):
return esi_data
if "system_id" not in esi_data:
raise ValueError("system_id not found in moon response - data error")
system_id = esi_data["system_id"]
solar_system_data = EveSolarSystem.objects._fetch_from_esi(id=system_id)
if "planets" not in solar_system_data:
raise ValueError("planets not found in solar system response - data error")
for planet in solar_system_data["planets"]:
if planet["planet_id"] == id:
if "moons" in planet:
esi_data["moons"] = planet["moons"]
if "asteroid_belts" in planet:
esi_data["asteroid_belts"] = planet["asteroid_belts"]
return esi_data
raise ValueError(
f"Failed to find moon {id} in solar system response for {system_id} "
f"- data error"
)
class EvePlanetChildrenManager(EveUniverseEntityModelManager):
def __init__(self) -> None:
super().__init__()
self._my_property_name = None
def _fetch_from_esi(self, id: int, enabled_sections: Iterable[str] = None) -> dict:
from .models import EveSolarSystem
if not self._my_property_name:
raise RuntimeWarning("my_property_name not initialzed")
esi_data = super()._fetch_from_esi(id=id)
if "system_id" not in esi_data:
raise ValueError("system_id not found in moon response - data error")
system_id = esi_data["system_id"]
solar_system_data = EveSolarSystem.objects._fetch_from_esi(id=system_id)
if "planets" not in solar_system_data:
raise ValueError("planets not found in solar system response - data error")
for planet in solar_system_data["planets"]:
if (
self._my_property_name in planet
and planet[self._my_property_name]
and id in planet[self._my_property_name]
):
esi_data["planet_id"] = planet["planet_id"]
return esi_data
raise ValueError(
f"Failed to find moon {id} in solar system response for {system_id} "
f"- data error"
)
class EveAsteroidBeltManager(EvePlanetChildrenManager):
def __init__(self) -> None:
super().__init__()
self._my_property_name = "asteroid_belts"
class EveMoonManager(EvePlanetChildrenManager):
def __init__(self) -> None:
super().__init__()
self._my_property_name = "moons"
class EveStargateManager(EveUniverseEntityModelManager):
"""For special handling of relations"""
def update_or_create_esi(
self,
*,
id: int,
include_children: bool = False,
wait_for_children: bool = True,
enabled_sections: Iterable[str] = None,
) -> Tuple[models.Model, bool]:
"""updates or creates an EveStargate object by fetching it from ESI (blocking).
Will always get/create parent objects
Args:
id: Eve Online ID of object
include_children: (no effect)
wait_for_children: (no effect)
Returns:
A tuple consisting of the requested object and a created flag
"""
obj, created = super().update_or_create_esi(
id=int(id),
include_children=include_children,
wait_for_children=wait_for_children,
)
if obj:
if obj.destination_eve_stargate is not None:
obj.destination_eve_stargate.destination_eve_stargate = obj
if obj.eve_solar_system is not None:
obj.destination_eve_stargate.destination_eve_solar_system = (
obj.eve_solar_system
)
obj.destination_eve_stargate.save()
return obj, created
class EveStationManager(EveUniverseEntityModelManager):
"""For special handling of station services"""
def _update_or_create_inline_objects(
self,
*,
parent_eve_data_obj: dict,
parent_obj: models.Model,
inline_objects: dict,
wait_for_children: bool,
enabled_sections: Iterable[str],
) -> None:
"""updates_or_creates station service objects for EveStations"""
from .models import EveStationService
if "services" in parent_eve_data_obj:
services = list()
for service_name in parent_eve_data_obj["services"]:
service, _ = EveStationService.objects.get_or_create(name=service_name)
services.append(service)
if services:
parent_obj.services.add(*services)
class EveTypeManager(EveUniverseEntityModelManager):
def update_or_create_esi(
self,
*,
id: int,
include_children: bool = False,
wait_for_children: bool = True,
enabled_sections: Iterable[str] = None,
) -> Tuple[models.Model, bool]:
obj, created = super().update_or_create_esi(
id=id,
include_children=include_children,
wait_for_children=wait_for_children,
enabled_sections=enabled_sections,
)
enabled_sections = self.model._enabled_sections_union(enabled_sections)
if enabled_sections and self.model.Section.TYPE_MATERIALS in enabled_sections:
from .models import EveTypeMaterial
EveTypeMaterial.objects.update_or_create_api(eve_type=obj)
return obj, created
class EveEntityQuerySet(models.QuerySet):
"""Custom queryset for EveEntity"""
MAX_DEPTH = 5
def update_from_esi(self) -> int:
"""Updates all Eve entity objects in this queryset from ESI"""
ids = list(self.values_list("id", flat=True))
if not ids:
return 0
else:
logger.info("Updating %d entities from ESI", len(ids))
resolved_counter = 0
for chunk_ids in chunks(ids, 1000):
logger.debug(
"Trying to resolve the following IDs from ESI:\n%s", chunk_ids
)
resolved_counter = self._resolve_entities_from_esi(chunk_ids)
return resolved_counter
def _resolve_entities_from_esi(self, ids: list, depth: int = 1):
resolved_counter = 0
try:
items = esi.client.Universe.post_universe_names(ids=ids).results()
except HTTPNotFound:
# if API fails to resolve all IDs, we divide and conquer,
# trying to resolve each half of the ids seperately
if len(ids) > 1 and depth < self.MAX_DEPTH:
resolved_counter += self._resolve_entities_from_esi(ids[::2], depth + 1)
resolved_counter += self._resolve_entities_from_esi(
ids[1::2], depth + 1
)
else:
logger.warning("Failed to resolve invalid IDs: %s", ids)
else:
resolved_counter += len(items)
for item in items:
try:
self.update_or_create(
id=item["id"],
defaults={"name": item["name"], "category": item["category"]},
)
except IntegrityError:
pass
return resolved_counter
class EveEntityManager(EveUniverseEntityModelManager):
"""Custom manager for EveEntity"""
def get_queryset(self) -> models.QuerySet:
return EveEntityQuerySet(self.model, using=self._db)
def get_or_create_esi(
self,
*,
id: int,
include_children: bool = False,
wait_for_children: bool = True,
) -> Tuple[models.Model, bool]:
"""gets or creates an EvEntity object.
The object is automatically fetched from ESI if it does not exist (blocking)
or if it has not yet been resolved.
Args:
id: Eve Online ID of object
Returns:
A tuple consisting of the requested EveEntity object and a created flag
Returns a None objects if the ID is invalid
"""
id = int(id)
try:
obj = self.exclude(name="").get(id=id)
created = False
except self.model.DoesNotExist:
obj, created = self.update_or_create_esi(
id=id,
include_children=include_children,
wait_for_children=wait_for_children,
)
return obj, created
def update_or_create_esi(
self,
*,
id: int,
include_children: bool = False,
wait_for_children: bool = True,
enabled_sections: Iterable[str] = None,
) -> Tuple[Optional[models.Model], bool]:
"""updates or creates an EveEntity object by fetching it from ESI (blocking).
Args:
id: Eve Online ID of object
include_children: (no effect)
wait_for_children: (no effect)
Returns:
A tuple consisting of the requested object and a created flag
When the ID is invalid the returned object will be None
Exceptions:
Raises all HTTP codes of ESI endpoint /universe/names except 404
"""
id = int(id)
logger.info("%s: Trying to resolve ID to EveEntity with ESI", id)
try:
result = esi.client.Universe.post_universe_names(ids=[id]).results()
except HTTPNotFound:
logger.info("%s: ID is not valid", id)
return None, False
item = result[0]
return self.update_or_create(
id=item.get("id"),
defaults={"name": item.get("name"), "category": item.get("category")},
)
def bulk_create_esi(self, ids: Iterable[int]) -> int:
"""bulk create and resolve multiple entities from ESI.
Will also resolve existing entities, that have no name.
Args:
ids: List of valid EveEntity IDs
Returns:
Count of updated entities
"""
ids = set(map(int, ids))
with transaction.atomic():
existing_ids = set(self.filter(id__in=ids).values_list("id", flat=True))
new_ids = ids.difference(existing_ids)
if new_ids:
objects = [self.model(id=id) for id in new_ids]
self.bulk_create(
objects,
batch_size=EVEUNIVERSE_BULK_METHODS_BATCH_SIZE,
ignore_conflicts=True,
)
to_update_qs = self.filter(id__in=new_ids) | self.filter(
id__in=ids.difference(new_ids), name=""
)
return to_update_qs.update_from_esi()
return 0
def update_or_create_all_esi(
self,
*,
include_children: bool = False,
wait_for_children: bool = True,
) -> None:
"""not implemented - do not use"""
raise NotImplementedError()
def bulk_update_new_esi(self) -> int:
"""updates all unresolved EveEntity objects in the database from ESI.
Returns:
Count of updated entities.
"""
return self.filter(name="").update_from_esi()
def bulk_update_all_esi(self):
"""Updates all EveEntity objects in the database from ESI.
Returns:
Count of updated entities.
"""
return self.all().update_from_esi()
def resolve_name(self, id: int) -> str:
"""return the name for the given Eve entity ID
or an empty string if ID is not valid
"""
if id is not None:
obj, _ = self.get_or_create_esi(id=int(id))
if obj:
return obj.name
return ""
def bulk_resolve_names(self, ids: Iterable[int]) -> EveEntityNameResolver:
"""returns a map of IDs to names in a resolver object for given IDs
Args:
ids: List of valid EveEntity IDs
Returns:
EveEntityNameResolver object helpful for quick resolving a large amount
of IDs
"""
ids = set(map(int, ids))
self.bulk_create_esi(ids)
return EveEntityNameResolver(
{
row[0]: row[1]
for row in self.filter(id__in=ids).values_list("id", "name")
}
)
class EveMarketPriceManager(models.Manager):
def update_from_esi(self, minutes_until_stale: int = None) -> int:
"""Updates market prices from ESI. Will only create new price objects for EveTypes that already exist in the database.
Args:
minutes_until_stale: only prices older then given minutes are regarding as stale and will be updated. Will use default (60) if not specified.
Returns:
Count of updated types
"""
from .models import EveType
if minutes_until_stale is None:
minutes_until_stale = self.model.DEFAULT_MINUTES_UNTIL_STALE
logger.info("Fetching market prices from ESI...")
entries = esi.client.Market.get_markets_prices().results()
if not entries:
return 0
entries_2 = {int(x["type_id"]): x for x in entries if "type_id" in x}
with transaction.atomic():
existing_types_ids = set(EveType.objects.values_list("id", flat=True))
relevant_prices_ids = set(entries_2.keys()).intersection(existing_types_ids)
deadline = now() - dt.timedelta(minutes=minutes_until_stale)
current_prices_ids = set(
self.filter(updated_at__gt=deadline).values_list(
"eve_type_id", flat=True
)
)
need_updating_ids = relevant_prices_ids.difference(current_prices_ids)
if not need_updating_ids:
logger.info("Market prices are up to date")
return 0
logger.info(
"Updating market prices for %s types...", len(need_updating_ids)
)
self.filter(eve_type_id__in=need_updating_ids).delete()
market_prices = [
self.model(
eve_type=get_or_create_esi_or_none("type_id", entry, EveType),
adjusted_price=entry.get("adjusted_price"),
average_price=entry.get("average_price"),
)
for type_id, entry in entries_2.items()
if type_id in need_updating_ids
]
self.bulk_create(
market_prices, batch_size=EVEUNIVERSE_BULK_METHODS_BATCH_SIZE
)
logger.info(
"Completed updating | |
<reponame>ostadabbas/PressureEye<filename>options/base_options.py<gh_stars>1-10
import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', default=r'/home/liu.shu/datasets/SLP/danaLab', help='path to images (should have subfolders trainA, trainB, valA, valB, etc). for PM point to danaLab directly')
parser.add_argument('--name', type=str, default='exp', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='vis2PM', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization | vis2PM ].')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='pm', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization | pm]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=60, help='input batch size, default 1')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size, or the std input size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none], for pm [scale shift_rotate ], default will scale to 255, any combine will ran random transform')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
# v1 pwrs
parser.add_argument('--suffix', default='n_phy{n_phy}_stg{n_stg}_whtL-{type_whtL}-{lambda_L}{type_L}_lap{lambda_lap}_sum{lambda_sum}_ssim{lambda_ssim}_D{lambda_D}L{n_layers_D}', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}_phy-{phyMode}')
# data
parser.add_argument('--mod_src', nargs='+', default=['RGB'], help='source modality list, can accept multiple modalities typical model [RGB|IR|depthRaw| PMarray]')
parser.add_argument('--mod_tar', nargs='+', default=['PMarray'], help='target modality list')
parser.add_argument('--n_phy', type=int, default=1, help='how many physics parameters to pass in, 1 for weight only, the physique order changed to w,gen,h... in dataset interface already, we can choose 0,1, or 10 generally')
parser.add_argument('--depthPreproc', default='normalize',
help='depth preprocessin method, [Normalize | HHA| ...], normalize to bed height and depth range, deprecated ')
parser.add_argument('--cov_li', nargs='+', default=['uncover'],
help='the cover condition list for data loading')
parser.add_argument('--pmDsProc', default='clip01',
help='how to process pmDs data, could be cliped to 0~1 or -1 to 1 [clip01|clip11]')
parser.add_argument('--if_align', default='w',
help='if align images or not')
# pm loss
parser.add_argument('--type_L', default='L2', help='loss type, [L1|L2], default MSE for regression, pwsr only l2 or pwsr')
parser.add_argument('--type_whtL', default='n', help='the mode for L loss weight [auto| n| pwrs], two modes right now, can be extended')
parser.add_argument('--whtScal', default=100, help='rescale normalized target value to weight L1 loss')
parser.add_argument('--h_mode', type=int, default=0, help='histogram mode: 0:image base, 1:global, 2:gau_filted')
parser.add_argument('--h_base', type=float, default=1.0, help='the histogram base, pseudo sample this much, the histogram base ')
parser.add_argument('--lambda_sum', type=float, default=0.0, help='coefficent for sum loss')
parser.add_argument('--lambda_L', type=float, default=100.0, help='weight for L loss, L1 or L2 shared')
parser.add_argument('--lambda_ssim', type=float, default=0.0, help='weight for ssim loss')
parser.add_argument('--lambda_D', type=float, default=0.0, help='weight for D loss, tune the GAN loss in G updating')
parser.add_argument('--lambda_lap', type=float, default=0.001, help='weight of the laplacian smoothing')
parser.add_argument('--kdeMode', type=int, default=0, help='kde mode add blurry to the ')
parser.add_argument('--sig_kde', type=float, default=1., help='kde guassian kernel sigma')
parser.add_argument('--if_phyMean', default='y', help='if use mean for weight equality, otherwise use sum')
# net structure
parser.add_argument('--n_stg', type=int, default=3, help='how many stages to build for visPM model ')
parser.add_argument('--if_actiFn', default='wo', help='choose [w|wo] for the final layer activation function, depends on the clip mode, it will give sigmoid or tanh for activation')
parser.add_argument('--phyMode', default='enc', help='physique parameters injection mode for enc, effect for vis2PM only, [concat|fc_gated | sim_gated | sfg|dcfg], fc_gated: use fc pointwise gating, work with n_gateLayers together. sim_gated: simple times the weight scalar directly, sfg simple final simple gated, just scale everything in image to save value dcfg1:final layer nn followed by decov, final one suppose to work with sigmoid model, enc: encoder to cat') # not very helpful , just concat is good. add additional layers.
parser.add_argument('--n_gateLayers', type=int, default=1, help='how many gate layers for the physique input, used for the fc_gated of phyMode')
parser.add_argument('--if_normPhy', type=str, default='wo', help='if normalize the physical vectors')
parser.add_argument('--if_posInit', type=str, default='w', help='if use the [w|wo] positive init operation for sfg layer only')
# testing specification
parser.add_argument('--if_test', type=str, default='n', help='if this is only for test, otherwise run the training session')
parser.add_argument('--n_testPM', type=int, default=12, help='how many subject is usded for test')
parser.add_argument('--rg_PCS', type=float, default=0.1, help='the PCS test range around center')
parser.add_argument('--pcs_test', type=float, default=0.05, help='PCS ratio for print out')
parser.add_argument('--efs_rt', type=float, default=0.05, help='the thresh for pressure needed to be taken into consideration')
parser.add_argument('--n_train', type=int, default=-1, help ='the max trained numbers in each epoch, -1 not restriction')
parser.add_argument('--num_test_in_train', type=int, default=50, help='how many test samples needed during training')
parser.add_argument('--num_test', type=int, default=-1, help='how many test images to run after training, -1 to run all')
parser.add_argument('--num_imgSv', type=int, default=500, help='how many images to save during final test')
parser.add_argument('--if_saveDiff', type=str, default='y', help='if save the diff result for PM test')
parser.add_argument('--if_saveImg', type=str, default='y', help='if save images for testPM session')
parser.add_argument('--if_saveWhtCmb', type=str, default='n', help='if save images for testPM session')
# --- from other places
parser.add_argument('--niter', type=int, default=25, help='# of iter (should be epoches) at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=5, help='# of iter (epoches) to linearly decay learning rate to zero')
parser.add_argument('--predNm', default='test_diffV2.npz', help='the prediction output result. V2 is used to differ from original with diffferent content')
# parser.add_argument('--modSrc_li', nargs='+', help)
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized, next call second time, parser will not be available, wrong logic?
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name) # get modify option static function
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name) # add more options
parser = dataset_option_setter(parser, self.isTrain) # make checking complicated, put in one file is easier I think.
# save and return the parser
self.parser = parser
opt, _ = parser.parse_known_args()
# return parser.parse_args()
return opt
def print_options(self, opt):
"""Print and save options
It will print both | |
**SourcePath** *(string) --* The local absolute path of the volume resource on the host. The source path for a volume resource type cannot start with ''/sys''.
- **S3MachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon S3 machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **S3Uri** *(string) --* The URI of the source model in an S3 bucket. The model package must be in tar.gz or .zip format.
- **SageMakerMachineLearningModelResourceData** *(dict) --* Attributes that define an Amazon SageMaker machine learning resource.
- **DestinationPath** *(string) --* The absolute local path of the resource inside the Lambda environment.
- **SageMakerJobArn** *(string) --* The ARN of the Amazon SageMaker training job that represents the source model.
- **SecretsManagerSecretResourceData** *(dict) --* Attributes that define a secret resource, which references a secret from AWS Secrets Manager.
- **ARN** *(string) --* The ARN of the Secrets Manager secret to make available on the core. The value of the secret's latest version (represented by the ''AWSCURRENT'' staging label) is included by default.
- **AdditionalStagingLabelsToDownload** *(list) --* Optional. The staging labels whose values you want to make available on the core, in addition to ''AWSCURRENT''.
- *(string) --*
- **Id** *(string) --* The ID of the resource definition version.
- **Version** *(string) --* The version of the resource definition version.
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:type ResourceDefinitionVersionId: string
:param ResourceDefinitionVersionId: **[REQUIRED]** The ID of the resource definition version.
:rtype: dict
:returns:
"""
pass
def get_service_role_for_account(self) -> Dict:
"""
Retrieves the service role that is attached to your account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetServiceRoleForAccount>`_
**Request Syntax**
::
response = client.get_service_role_for_account()
**Response Syntax**
::
{
'AssociatedAt': 'string',
'RoleArn': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time when the service role was associated with the account.
- **RoleArn** *(string) --* The ARN of the role which is associated with the account.
:rtype: dict
:returns:
"""
pass
def get_subscription_definition(self, SubscriptionDefinitionId: str) -> Dict:
"""
Retrieves information about a subscription definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetSubscriptionDefinition>`_
**Request Syntax**
::
response = client.get_subscription_definition(
SubscriptionDefinitionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:rtype: dict
:returns:
"""
pass
def get_subscription_definition_version(self, SubscriptionDefinitionId: str, SubscriptionDefinitionVersionId: str, NextToken: str = None) -> Dict:
"""
Retrieves information about a subscription definition version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetSubscriptionDefinitionVersion>`_
**Request Syntax**
::
response = client.get_subscription_definition_version(
NextToken='string',
SubscriptionDefinitionId='string',
SubscriptionDefinitionVersionId='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Definition': {
'Subscriptions': [
{
'Id': 'string',
'Source': 'string',
'Subject': 'string',
'Target': 'string'
},
]
},
'Id': 'string',
'NextToken': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the subscription definition version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the subscription definition version was created.
- **Definition** *(dict) --* Information about the subscription definition version.
- **Subscriptions** *(list) --* A list of subscriptions.
- *(dict) --* Information about a subscription.
- **Id** *(string) --* A descriptive or arbitrary ID for the subscription. This value must be unique within the subscription definition version. Max length is 128 characters with pattern ''[a-zA-Z0-9:_-]+''.
- **Source** *(string) --* The source of the subscription. Can be a thing ARN, a Lambda function ARN, a connector ARN, 'cloud' (which represents the AWS IoT cloud), or 'GGShadowService'.
- **Subject** *(string) --* The MQTT topic used to route the message.
- **Target** *(string) --* Where the message is sent to. Can be a thing ARN, a Lambda function ARN, a connector ARN, 'cloud' (which represents the AWS IoT cloud), or 'GGShadowService'.
- **Id** *(string) --* The ID of the subscription definition version.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Version** *(string) --* The version of the subscription definition version.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:type SubscriptionDefinitionId: string
:param SubscriptionDefinitionId: **[REQUIRED]** The ID of the subscription definition.
:type SubscriptionDefinitionVersionId: string
:param SubscriptionDefinitionVersionId: **[REQUIRED]** The ID of the subscription definition version.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_bulk_deployment_detailed_reports(self, BulkDeploymentId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Gets a paginated list of the deployments that have been started in a bulk deployment operation, and their current deployment status.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListBulkDeploymentDetailedReports>`_
**Request Syntax**
::
response = client.list_bulk_deployment_detailed_reports(
BulkDeploymentId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Deployments': [
{
'CreatedAt': 'string',
'DeploymentArn': 'string',
'DeploymentId': 'string',
'DeploymentStatus': 'string',
'DeploymentType': 'NewDeployment'|'Redeployment'|'ResetDeployment'|'ForceResetDeployment',
'ErrorDetails': [
{
'DetailedErrorCode': 'string',
'DetailedErrorMessage': 'string'
},
],
'ErrorMessage': 'string',
'GroupArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the list of deployments for the given group.
- **Deployments** *(list) --* A list of the individual group deployments in the bulk deployment operation.
- *(dict) --* Information about an individual group deployment in a bulk deployment operation.
- **CreatedAt** *(string) --* The time, in ISO format, when the deployment was created.
- **DeploymentArn** *(string) --* The ARN of the group deployment.
- **DeploymentId** *(string) --* The ID of the group deployment.
- **DeploymentStatus** *(string) --* The current status of the group deployment: ''InProgress'', ''Building'', ''Success'', or ''Failure''.
- **DeploymentType** *(string) --* The type of the deployment.
- **ErrorDetails** *(list) --* Details about the error.
- *(dict) --* Details about the error.
- **DetailedErrorCode** *(string) --* A detailed error code.
- **DetailedErrorMessage** *(string) --* A detailed error message.
- **ErrorMessage** *(string) --* The error message for a failed deployment
- **GroupArn** *(string) --* The ARN of the Greengrass group.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type BulkDeploymentId: string
:param BulkDeploymentId: **[REQUIRED]** The ID of the bulk deployment.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_bulk_deployments(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Returns a list of bulk deployments.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListBulkDeployments>`_
**Request Syntax**
::
response = client.list_bulk_deployments(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'BulkDeployments': [
{
'BulkDeploymentArn': 'string',
'BulkDeploymentId': 'string',
'CreatedAt': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response body contains the list of bulk | |
<gh_stars>0
import mmcv
import torch
from mmdet.core import bbox2roi, build_assigner, build_sampler
from mmdet.models.dense_heads import (AnchorHead, CornerHead, FCOSHead,
FSAFHead, GuidedAnchorHead,
SOLOHead)
from mmdet.models.roi_heads.bbox_heads import BBoxHead
from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead
def test_solo_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = SOLOHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
train_cfg=train_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(mask_preds, cls_preds, gt_bboxes, gt_labels,
gt_masks, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
one_gt_losses = self.loss(mask_preds, cls_preds, gt_bboxes, gt_labels,
gt_masks, img_metas, gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
def test_fcos_head_loss():
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = FCOSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centerness = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_fsaf_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = dict(
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'))
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = FSAFHead(num_classes=4, in_channels=1, train_cfg=train_cfg, **cfg)
if torch.cuda.is_available():
head.cuda()
# FSAF head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.anchor_generator.strides))
]
cls_scores, bbox_preds = head.forward(feat)
gt_bboxes_ignore = None
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
# Test that empty ground truth encourages the network to predict bkg
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
def test_ga_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False))
head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
if torch.cuda.is_available():
head.cuda()
feat = [
torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = BBoxHead(in_channels=8, roi_feat_size=3)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels | |
<gh_stars>0
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
from pydevd_constants import * #@UnusedWildImport
from _pydev_imps import _pydev_time as time, _pydev_thread
import _pydev_threading as threading
from _pydev_imps._pydev_socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR
from pydev_imports import _queue
try:
from urllib import quote, quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
import pydevd_vars
import pydevd_tracing
import pydevd_vm_type
import pydevd_file_utils
import traceback
from pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
import pydev_log
import _pydev_completer
from pydevd_tracing import GetExceptionTracebackStr
import pydevd_console
from pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101':'CMD_RUN',
'102':'CMD_LIST_THREADS',
'103':'CMD_THREAD_CREATE',
'104':'CMD_THREAD_KILL',
'105':'CMD_THREAD_SUSPEND',
'106':'CMD_THREAD_RUN',
'107':'CMD_STEP_INTO',
'108':'CMD_STEP_OVER',
'109':'CMD_STEP_RETURN',
'110':'CMD_GET_VARIABLE',
'111':'CMD_SET_BREAK',
'112':'CMD_REMOVE_BREAK',
'113':'CMD_EVALUATE_EXPRESSION',
'114':'CMD_GET_FRAME',
'115':'CMD_EXEC_EXPRESSION',
'116':'CMD_WRITE_TO_CONSOLE',
'117':'CMD_CHANGE_VARIABLE',
'118':'CMD_RUN_TO_LINE',
'119':'CMD_RELOAD_CODE',
'120':'CMD_GET_COMPLETIONS',
'121':'CMD_CONSOLE_EXEC',
'122':'CMD_ADD_EXCEPTION_BREAK',
'123':'CMD_REMOVE_EXCEPTION_BREAK',
'124':'CMD_LOAD_SOURCE',
'125':'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126':'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127':'CMD_SET_NEXT_STATEMENT',
'128':'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'501':'CMD_VERSION',
'502':'CMD_RETURN',
'901':'CMD_ERROR',
'143':'CMD_GET_ARRAY',
'144':'CMD_STEP_INTO_MY_CODE',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# PydevdLog
#=======================================================================================================================
def PydevdLog(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
globalDbg = None
#=======================================================================================================================
# GetGlobalDebugger
#=======================================================================================================================
def GetGlobalDebugger():
return GlobalDebuggerHolder.globalDbg
#=======================================================================================================================
# SetGlobalDebugger
#=======================================================================================================================
def SetGlobalDebugger(dbg):
GlobalDebuggerHolder.globalDbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.dontTraceMe = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON:
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self.OnRun()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def OnRun(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def doKillPydevThread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def stopTrace(self):
if self.dontTraceMe:
disable_tracing = True
if pydevd_vm_type.GetVmType() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
def doKillPydevThread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def OnRun(self):
self.stopTrace()
buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
self.handleExcept()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
pydev_log.debug('received >>%s<<\n' % (buffer,))
if len(buffer) == 0:
self.handleExcept()
break
while buffer.find('\n') != -1:
command, buffer = buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.processCommand(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handleExcept()
def handleExcept(self):
GlobalDebuggerHolder.globalDbg.FinishDebuggingSession()
def processCommand(self, cmd_id, seq, text):
GlobalDebuggerHolder.globalDbg.processNetCommand(cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.GetVmType() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def addCommand(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def OnRun(self):
""" just loop and write responses """
self.stopTrace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#PydevdLog(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.getOutgoing()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.globalDbg.FinishDebuggingSession()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# StartServer
#=======================================================================================================================
def StartServer(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
newSock, _addr = s.accept()
return newSock
#=======================================================================================================================
# StartClient
#=======================================================================================================================
def StartClient(host, port):
""" connects to a host/port """
PydevdLog(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
| |
# NURBSLib_EVM
# (c) <NAME> 2016-2017
# <EMAIL>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
## Basic NURBS rules and terms
## Order >= 2
## Order: 2 = line, 3 = quadratic, 4 = cubic ...
## Degree = Order - 1
## Order = Degree + 1
## nPoles >= Order
## nPoles >= Degree + 1
## nKnots = nPoles + Order
## nKnots = nPoles + degree + 1
## knot vector strictly ascending
## Pinned knot vector: k=Order, first k knots are equal, last k knots are equal
####
#### SECTION 1: DIRECT FUNCTIONS - NO PARAMETRIC LINKING BETWEEN OBJECTS - LEGACY
#### SECTION 2: PYTHON FEATURE CLASSES - PARAMETRIC LINKING BETWEEN OBJECTS - IN PROGRESS (start around line 1150)
####
### SECTION 1: DIRECT FUNCTIONS - NO PARAMETRIC LINKING BETWEEN OBJECTS - LEGACY - will be kept in place until it is thouroughly picked over.
# Bottom up view:
# poles = 3D points with weights, as [[x,y,z],w], or [x,y,z] (these are leftovers waiting to receive weights).
## These are the basic input data for all that follows. They are obtained from the FreeCAD functions .getPoles() and .getWeights()
## NOTE: Poles in FreeCAD, such as returned by .getPoles(), refer only to xyz coordinates of a control point, THROUGHOUT the following functions, pole means [[x,y,z],w]
## lists are probably not efficient, but until FreeCAD has fully integrated homogenous coordinates for all NURBS functions, this is easier for me :)
## right now, the computation of my scripts is ridiculously fast compared to the time taken to generate the surfaces using the FreeCAD Part.BSplineSurface() function
# Bezier_Cubic_curve([pole X 4]) - pinned cubic rational B spline - Part.BSplineCurve() in cubic bezier form
# NURBS_Cubic_6P_curve([pole X 6]) - pinned cubic rational Bspline - 6 control points, just enough to have independent endpoint curvature
# Cubic_Bezier_ddu(poles1, pole2) - cubic derivative at curve start (pole1) based on first two poles (no curve required). Weights not included yet
# Cubic_6P_ddu(poles1, pole2) - cubic derivative at curve start (pole1) based on first two poles (no curve required). Weights not included yet
# Cubic_Bezier_d2du2(poles1, pole2, pole3) - cubic second derivative at curve start (pole1) based on first three poles (no curve required). Weights not included yet
# Cubic_6P_d2du2(poles1, pole2, pole3) - cubic second derivative at curve start (pole1) based on first three poles (no curve required). Weights not included yet
# Cubic_Bezier_curvature(poles1, pole2, pole3) - curvature at curve start (pole1) based on the first three poles (no curve required). Weights not included yet
# Cubic_6P_curvature(poles1, pole2, pole3) - curvature at curve start (pole1) based on the first three poles (no curve required). Weights not included yet
# orient_a_to_b(polesa,polesb) - polesa and polesb are lists of poles that share one endpoint. if needed, this function reorders a so that a.end = b.start or b.end b is never modified
# grid_44_quad(c1,c2,c3,c4) - given four curves of 4 poles each that form a closed loop, prepare a 4*4 nurbs control grid
# grid_44_tri(c2,c2,c3) - given three curves of 4 poles each that form a closed loop, prepare 4 x 4 control grid.
##this is a singular/degenerate pach. intersection of first and last curve is the singular point/edge
# grid_44_tri_alt(c2,c2,c3) - given three curves of 4 poles each that form a closed loop, prepare 4 x 4 control grid.
##this is a singular/degenerate pach. intersection of first and last curve is the singular point/edge
# grid_66_quad(c1,c2,c3,c4) - given four curves of 6 poles each that form a closed loop, prepare a 6*6 nurbs control grid. Curve weights not assimilated yet
# grid_64_quad(c1,c2,c3,c4) - c1 and c3 are 6P curves, c2 and c4 are Bezier curves. Prepares 6*4 NURBS control grid
# poly_grid_44(grid_44) - given a 4 X 4 control patch, show the internal control mesh
# Bezier_Bicubic_surf(grid_44) - given a 4 x 4 control patch, build the bicubic bezier surface from a Part.BSplineSurface() !NOT! a Part.BezierSurface()
# NURBS_Cubic_66_surf(grid_66) - given a 6 x 6 control patch, build the bicubic bezier surface from a Part.BSplineSurface().
# isect_test(curve, surf, u)
# isect_curve_surf(curve, surf)
import Part
import FreeCAD
from FreeCAD import Base
from FreeCAD import Gui
import math
import numpy as np
def Bezier_Cubic_curve(poles):
#draws a degree 3 rational bspline from first to last point,
# second and third act as tangents
# poles is a list: [[[x,y,z],w],[[x,y,z],w],[[x,y,z],w],[[x,y,z],w]]
## nKnot = 4 + 3 +1 = 8
## Order = 3 + 1 = 4
degree=3
nPoles=4
knot=[0,0,0,0,1,1,1,1]
bs=Part.BSplineCurve()
bs.increaseDegree(degree)
id=1
for i in range(0,len(knot)): #-1):
bs.insertKnot(knot[i],id,0.0000001)
i=0
for ii in range(0,nPoles):
bs.setPole(ii+1,poles[i][0],poles[i][1])
i=i+1;
return bs
def NURBS_Cubic_6P_curve(poles):
# draws a degree 3 rational bspline from first to last point,
# second and third act as tangents
# poles is a list: [[[x,y,z],w],[[x,y,z],w],[[x,y,z],w],[[x,y,z],w],[[x,y,z],w],[[x,y,z],w]]
## nKnot = 6 + 3 +1 = 10
## Order = 3 + 1 = 4
degree=3
nPoles=6
knot=[0,0,0,0,0.3333,0.6666,1,1,1,1]
bs=Part.BSplineCurve()
bs.increaseDegree(degree)
id=1
for i in range(0,len(knot)): #-1):
bs.insertKnot(knot[i],id,0.0000001)
i=0
for ii in range(0,nPoles):
bs.setPole(ii+1,poles[i][0],poles[i][1])
i=i+1;
return bs
def Cubic_Bezier_ddu(pole1, pole2): # first derivative with respect to parameter, returns value at first pole given. weights not included!
P1=Base.Vector(pole1)
P2=Base.Vector(pole2)
Cubic_Bezier_ddu = (P2 - P1)*3
return Cubic_Bezier_ddu
def Cubic_6P_ddu(pole1, pole2): # first derivative with respect to parameter, returns value at first pole given. weights not included!
P1=Base.Vector(pole1)
P2=Base.Vector(pole2)
Cubic_6P_ddu = (P2 - P1)*9
return Cubic_6P_ddu
def Cubic_Bezier_d2du2(pole1, pole2, pole3): # second derivative with respect to parameter, returns value at first pole given. weights not included!
P1=Base.Vector(pole1)
P2=Base.Vector(pole2)
P3=Base.Vector(pole3)
Cubic_Bezier_d2du2 = (P1- P2*2 + P3)*6
return Cubic_Bezier_d2du2
def Cubic_6P_d2du2(pole1, pole2, pole3): # second derivative with respect to parameter, returns value at first pole given. weights not included!
P1=Base.Vector(pole1)
P2=Base.Vector(pole2)
P3=Base.Vector(pole3)
Cubic_6P_d2du2 = (P1*2- P2*3 + P3)*27
return Cubic_6P_d2du2
def Cubic_Bezier_curvature(pole1, pole2, pole3): # curvature, returns value at first pole given. weights not included!
ddu = Cubic_Bezier_ddu(pole1, pole2)
d2du2 = Cubic_Bezier_d2du2(pole1, pole2, pole3)
Cubic_Bezier_curvature = ddu.cross(d2du2).Length/ddu.Length.__pow__(3)
return Cubic_Bezier_curvature
def Cubic_6P_curvature(pole1, pole2, pole3): # curvature, returns value at first pole given. weights not included!
ddu = Cubic_6P_ddu(pole1, pole2)
d2du2 = Cubic_6P_d2du2(pole1, pole2, pole3)
Cubic_6P_curvature = ddu.cross(d2du2).Length/ddu.Length.__pow__(3)
return Cubic_6P_curvature
def equalVectors(vector0,vector1,tol):
if (vector1-vector0).Length <= tol:
return 1
elif (vector1-vector0).Length <= tol:
return 0
def orient_a_to_b(polesa,polesb):
if equalVectors(polesa[-1],polesb[0],0.000001): # last point of first curve is first point of second curve
# curve 1 is oriented properly
return polesa
elif equalVectors(polesa[-1],polesb[-1],0.000001): # last point of first curve is last point of second curve
# curve 1 is oriented properly
return polesa
elif equalVectors(polesa[0],polesb[0],0.000001): # first point of first curve is first point of second curve
# curve 1 is reversed
return polesa[::-1]
elif equalVectors(polesa[0],polesb[-1],0.000001): # first point of first curve is last point of second curve
# curve 1 is reversed
return polesa[::-1]
else:
print 'curves do not share endpoints'
return 0
def grid_44_quad(c1,c2,c3,c4): # prepare 4 x 4 control point patch from four curves
# extract curve poles
poles1=c1.getPoles()
poles2=c2.getPoles()
poles3=c3.getPoles()
poles4=c4.getPoles()
weights1=c1.getWeights()
weights2=c2.getWeights()
weights3=c3.getWeights()
weights4=c4.getWeights()
# fix edge orientations, going counterclockwise from first curve (c1)
quad_1_2 = orient_a_to_b(poles1,poles2)
quad_2_3 = orient_a_to_b(poles2,poles3)
quad_3_4 = orient_a_to_b(poles3,poles4)
quad_4_1 = orient_a_to_b(poles4,poles1)
# flip weights of flipped edges - maybe this should go into 'orient_a_to_b()'
if quad_1_2[0]!=poles1[0] and quad_1_2[0]==poles1[-1]:
weights1=weights1[::-1]
if quad_2_3[0]!=poles2[0] and quad_2_3[0]==poles2[-1]:
weights2=weights2[::-1]
if quad_3_4[0]!=poles3[0] and quad_3_4[0]==poles3[-1]:
weights3=weights3[::-1]
if quad_4_1[0]!=poles4[0] and quad_4_1[0]==poles4[-1]:
weights4=weights4[::-1]
# bottom edge, left to right
p_0_0 = quad_1_2[0]
p_0_1 = quad_1_2[1]
p_0_2 = quad_1_2[2]
p_0_3 = quad_1_2[3]
# right edge, bottom to top, SKIP starting corner
p_1_3 = quad_2_3[1]
p_2_3 = quad_2_3[2]
p_3_3 = quad_2_3[3]
# top edge, right to left, SKIP starting corner
p_3_2 = quad_3_4[1]
p_3_1 = quad_3_4[2]
p_3_0 = quad_3_4[3]
# left edge, top to bottom, SKIP both corners
p_2_0 = quad_4_1[1]
p_1_0 = quad_4_1[2]
# calculate inner control points. this method makes continuous patches, but all corner grids make parallelograms. need to improve.
# needs to stay planar across the patch corners!
p_1_1 = p_0_0 + (p_0_1 - p_0_0) + (p_1_0 - p_0_0)
p_1_2 = p_0_3 + (p_0_2 - p_0_3) + (p_1_3 - p_0_3)
p_2_1 = p_3_0 + (p_3_1 - p_3_0) + (p_2_0 - p_3_0)
p_2_2 = p_3_3 + (p_2_3 - p_3_3) + (p_3_2 - p_3_3)
# set weights, assign to control points, final format is [[x,y,z],w]. The patch will be a 4x4 matrix arranged as a list of 16 of these control format.
# original edges
w00 = [p_0_0, weights1[0]]
w01 = [p_0_1, weights1[1]]
w02 = [p_0_2, weights1[2]]
w03 = [p_0_3, weights1[3]]
w13 = | |
conf, section = "Service"):
return self.expand_special(conf.get(section, "CacheDirectory", ""), conf)
def get_LogsDirectory(self, conf, section = "Service"):
return self.expand_special(conf.get(section, "LogsDirectory", ""), conf)
def get_ConfigurationDirectory(self, conf, section = "Service"):
return self.expand_special(conf.get(section, "ConfigurationDirectory", ""), conf)
def get_RuntimeDirectoryMode(self, conf, section = "Service"):
return conf.get(section, "RuntimeDirectoryMode", "")
def get_StateDirectoryMode(self, conf, section = "Service"):
return conf.get(section, "StateDirectoryMode", "")
def get_CacheDirectoryMode(self, conf, section = "Service"):
return conf.get(section, "CacheDirectoryMode", "")
def get_LogsDirectoryMode(self, conf, section = "Service"):
return conf.get(section, "LogsDirectoryMode", "")
def get_ConfigurationDirectoryMode(self, conf, section = "Service"):
return conf.get(section, "ConfigurationDirectoryMode", "")
def clean_service_directories(self, conf, which = ""):
ok = True
section = self.get_unit_section_from(conf)
nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section)
nameStateDirectory = self.get_StateDirectory(conf, section)
nameCacheDirectory = self.get_CacheDirectory(conf, section)
nameLogsDirectory = self.get_LogsDirectory(conf, section)
nameConfigurationDirectory = self.get_ConfigurationDirectory(conf, section)
root = conf.root_mode()
for name in nameRuntimeDirectory.split(" "):
if not name.strip(): continue
RUN = get_RUNTIME_DIR(root)
path = os.path.join(RUN, name)
if which in ["all", "runtime", ""]:
dirpath = os_path(self._root, path)
ok = self.do_rm_tree(dirpath) and ok
if RUN == "/run":
for var_run in ("/var/run", "/tmp/run"):
var_path = os.path.join(var_run, name)
var_dirpath = os_path(self._root, var_path)
self.do_rm_tree(var_dirpath)
for name in nameStateDirectory.split(" "):
if not name.strip(): continue
DAT = get_VARLIB_HOME(root)
path = os.path.join(DAT, name)
if which in ["all", "state"]:
dirpath = os_path(self._root, path)
ok = self.do_rm_tree(dirpath) and ok
for name in nameCacheDirectory.split(" "):
if not name.strip(): continue
CACHE = get_CACHE_HOME(root)
path = os.path.join(CACHE, name)
if which in ["all", "cache", ""]:
dirpath = os_path(self._root, path)
ok = self.do_rm_tree(dirpath) and ok
for name in nameLogsDirectory.split(" "):
if not name.strip(): continue
LOGS = get_LOG_DIR(root)
path = os.path.join(LOGS, name)
if which in ["all", "logs"]:
dirpath = os_path(self._root, path)
ok = self.do_rm_tree(dirpath) and ok
for name in nameConfigurationDirectory.split(" "):
if not name.strip(): continue
CONFIG = get_CONFIG_HOME(root)
path = os.path.join(CONFIG, name)
if which in ["all", "configuration", ""]:
dirpath = os_path(self._root, path)
ok = self.do_rm_tree(dirpath) and ok
return ok
def env_service_directories(self, conf):
envs = {}
section = self.get_unit_section_from(conf)
nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section)
nameStateDirectory = self.get_StateDirectory(conf, section)
nameCacheDirectory = self.get_CacheDirectory(conf, section)
nameLogsDirectory = self.get_LogsDirectory(conf, section)
nameConfigurationDirectory = self.get_ConfigurationDirectory(conf, section)
root = conf.root_mode()
for name in nameRuntimeDirectory.split(" "):
if not name.strip(): continue
RUN = get_RUNTIME_DIR(root)
path = os.path.join(RUN, name)
envs["RUNTIME_DIRECTORY"] = path
for name in nameStateDirectory.split(" "):
if not name.strip(): continue
DAT = get_VARLIB_HOME(root)
path = os.path.join(DAT, name)
envs["STATE_DIRECTORY"] = path
for name in nameCacheDirectory.split(" "):
if not name.strip(): continue
CACHE = get_CACHE_HOME(root)
path = os.path.join(CACHE, name)
envs["CACHE_DIRECTORY"] = path
for name in nameLogsDirectory.split(" "):
if not name.strip(): continue
LOGS = get_LOG_DIR(root)
path = os.path.join(LOGS, name)
envs["LOGS_DIRECTORY"] = path
for name in nameConfigurationDirectory.split(" "):
if not name.strip(): continue
CONFIG = get_CONFIG_HOME(root)
path = os.path.join(CONFIG, name)
envs["CONFIGURATION_DIRECTORY"] = path
return envs
def create_service_directories(self, conf):
envs = {}
section = self.get_unit_section_from(conf)
nameRuntimeDirectory = self.get_RuntimeDirectory(conf, section)
modeRuntimeDirectory = self.get_RuntimeDirectoryMode(conf, section)
nameStateDirectory = self.get_StateDirectory(conf, section)
modeStateDirectory = self.get_StateDirectoryMode(conf, section)
nameCacheDirectory = self.get_CacheDirectory(conf, section)
modeCacheDirectory = self.get_CacheDirectoryMode(conf, section)
nameLogsDirectory = self.get_LogsDirectory(conf, section)
modeLogsDirectory = self.get_LogsDirectoryMode(conf, section)
nameConfigurationDirectory = self.get_ConfigurationDirectory(conf, section)
modeConfigurationDirectory = self.get_ConfigurationDirectoryMode(conf, section)
root = conf.root_mode()
user = self.get_User(conf)
group = self.get_Group(conf)
for name in nameRuntimeDirectory.split(" "):
if not name.strip(): continue
RUN = get_RUNTIME_DIR(root)
path = os.path.join(RUN, name)
logg.debug("RuntimeDirectory %s", path)
self.make_service_directory(path, modeRuntimeDirectory)
self.chown_service_directory(path, user, group)
envs["RUNTIME_DIRECTORY"] = path
if RUN == "/run":
for var_run in ("/var/run", "/tmp/run"):
if os.path.isdir(var_run):
var_path = os.path.join(var_run, name)
var_dirpath = os_path(self._root, var_path)
if os.path.isdir(var_dirpath):
if not os.path.islink(var_dirpath):
logg.debug("not a symlink: %s", var_dirpath)
continue
dirpath = os_path(self._root, path)
basepath = os.path.dirname(var_dirpath)
if not os.path.isdir(basepath):
os.makedirs(basepath)
try:
os.symlink(dirpath, var_dirpath)
except Exception as e:
logg.debug("var symlink %s\n\t%s", var_dirpath, e)
for name in nameStateDirectory.split(" "):
if not name.strip(): continue
DAT = get_VARLIB_HOME(root)
path = os.path.join(DAT, name)
logg.debug("StateDirectory %s", path)
self.make_service_directory(path, modeStateDirectory)
self.chown_service_directory(path, user, group)
envs["STATE_DIRECTORY"] = path
for name in nameCacheDirectory.split(" "):
if not name.strip(): continue
CACHE = get_CACHE_HOME(root)
path = os.path.join(CACHE, name)
logg.debug("CacheDirectory %s", path)
self.make_service_directory(path, modeCacheDirectory)
self.chown_service_directory(path, user, group)
envs["CACHE_DIRECTORY"] = path
for name in nameLogsDirectory.split(" "):
if not name.strip(): continue
LOGS = get_LOG_DIR(root)
path = os.path.join(LOGS, name)
logg.debug("LogsDirectory %s", path)
self.make_service_directory(path, modeLogsDirectory)
self.chown_service_directory(path, user, group)
envs["LOGS_DIRECTORY"] = path
for name in nameConfigurationDirectory.split(" "):
if not name.strip(): continue
CONFIG = get_CONFIG_HOME(root)
path = os.path.join(CONFIG, name)
logg.debug("ConfigurationDirectory %s", path)
self.make_service_directory(path, modeConfigurationDirectory)
# not done according the standard
# self.chown_service_directory(path, user, group)
envs["CONFIGURATION_DIRECTORY"] = path
return envs
def make_service_directory(self, path, mode):
ok = True
dirpath = os_path(self._root, path)
if not os.path.isdir(dirpath):
try:
os.makedirs(dirpath)
logg.info("created directory path: %s", dirpath)
except Exception as e: # pragma: no cover
logg.debug("errors directory path: %s\n\t%s", dirpath, e)
ok = False
filemode = int_mode(mode)
if filemode:
try:
os.chmod(dirpath, filemode)
except Exception as e: # pragma: no cover
logg.debug("errors directory path: %s\n\t%s", dirpath, e)
ok = False
else:
logg.debug("path did already exist: %s", dirpath)
if not ok:
logg.debug("could not fully create service directory %s", path)
return ok
def chown_service_directory(self, path, user, group):
# the standard defines an optimization so that if the parent
# directory does have the correct user and group then there
# is no other chown on files and subdirectories to be done.
dirpath = os_path(self._root, path)
if not os.path.isdir(dirpath):
logg.debug("chown did not find %s", dirpath)
return True
if user or group:
st = os.stat(dirpath)
st_user = pwd.getpwuid(st.st_uid).pw_name
st_group = grp.getgrgid(st.st_gid).gr_name
change = False
if user and (user.strip() != st_user and user.strip() != str(st.st_uid)):
change = True
if group and (group.strip() != st_group and group.strip() != str(st.st_gid)):
change = True
if change:
logg.debug("do chown %s", dirpath)
try:
ok = self.do_chown_tree(dirpath, user, group)
logg.info("changed %s:%s %s", user, group, ok)
return ok
except Exception as e:
logg.info("oops %s\n\t%s", dirpath, e)
else:
logg.debug("untouched %s", dirpath)
return True
def do_chown_tree(self, path, user, group):
ok = True
uid, gid = -1, -1
if user:
uid = pwd.getpwnam(user).pw_uid
gid = pwd.getpwnam(user).pw_gid
if group:
gid = grp.getgrnam(group).gr_gid
for dirpath, dirnames, filenames in os.walk(path, topdown=False):
for item in filenames:
filepath = os.path.join(dirpath, item)
try:
os.chown(filepath, uid, gid)
except Exception as e: # pragma: no cover
logg.debug("could not set %s:%s on %s\n\t%s", user, group, filepath, e)
ok = False
for item in dirnames:
dir_path = os.path.join(dirpath, item)
try:
os.chown(dir_path, uid, gid)
except Exception as e: # pragma: no cover
logg.debug("could not set %s:%s on %s\n\t%s", user, group, dir_path, e)
ok = False
try:
os.chown(path, uid, gid)
except Exception as e: # pragma: no cover
logg.debug("could not set %s:%s on %s\n\t%s", user, group, path, e)
ok = False
if not ok:
logg.debug("could not chown %s:%s service directory %s", user, group, path)
return ok
def clean_modules(self, *modules):
""" [UNIT]... -- remove the state directories
/// it recognizes --what=all or any of configuration, state, cache, logs, runtime
while an empty value (the default) removes cache and runtime directories"""
found_all = True
units = []
for module in modules:
matched = self.match_units(to_list(module))
if not matched:
logg.error("Unit %s not found.", unit_of(module))
self.error |= NOT_FOUND
found_all = False
continue
for unit in matched:
if unit not in units:
units += [ unit ]
lines = _log_lines
follow = _force
ok = self.clean_units(units)
return ok and found_all
def clean_units(self, units, what = ""):
if not what:
what = _what_kind
ok = True
for unit in units:
ok = self.clean_unit(unit, what) and ok
return ok
def clean_unit(self, unit, what = ""):
conf = self.load_unit_conf(unit)
if not conf: return False
return self.clean_unit_from(conf, what)
def clean_unit_from(self, conf, what):
if self.is_active_from(conf):
logg.warning("can not clean active unit: %s", conf.name())
return False
return self.clean_service_directories(conf, what)
def log_modules(self, *modules):
""" [UNIT]... -- start 'less' on the log files for the services
/// use '-f' to follow and '-n lines' to limit output using 'tail',
using '--no-pager' just does a full 'cat'"""
found_all = True
units = []
for module in modules:
matched = self.match_units(to_list(module))
if not matched:
logg.error("Unit %s not found.", unit_of(module))
self.error |= NOT_FOUND
found_all = False
continue
for unit in matched:
if unit not in units:
units += [ unit ]
lines = _log_lines
follow = _force
result = self.log_units(units, lines, follow)
if result:
self.error = result
return False
return found_all
def log_units(self, | |
<filename>emtypen/emtypen.py
#!/usr/bin/env python
import clang
from clang.cindex import Index
import argparse
import os
import re
import sys
indent_spaces = 4
indentation = ' ' * indent_spaces
output = ['']
class client_data:
def __init__(self):
self.tu = None
self.current_namespaces = [] # cursors
self.current_struct = null_cursor
self.current_struct_prefix = ''
# function signature, forwarding call arguments, optional return
# keyword, function name, and "const"/"" for function constness
self.member_functions = [] # each element [''] * 5
self.printed_headers = False
self.filename = ''
self.include_guarded = False
self.form = ''
self.form_lines = []
self.headers = ''
self.copy_on_write = False
def get_tokens (tu, cursor):
return [x for x in tu.get_tokens(extent=cursor.extent)]
def print_tokens (tu, cursor, tokens_from_include_directive):
tokens = get_tokens(tu, cursor)
open_angle = '<'
open_angle_seen = False
for token in tokens:
spelling = token.spelling
if not open_angle_seen:
output[0] += ' '
output[0] += spelling
if token == open_angle:
open_angle_seen = True
output[0] += '\n'
def struct_kind (kind):
if kind == clang.cindex.CursorKind.CLASS_DECL or \
kind == clang.cindex.CursorKind.STRUCT_DECL or \
kind == clang.cindex.CursorKind.CLASS_TEMPLATE or \
kind == clang.cindex.CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION:
return True
else:
return False
def indent (offset=0):
size = len(data.current_namespaces) + offset
return ' ' * (size * indent_spaces)
def print_lines (lines):
for line in lines:
if line != '':
output[0] += indent() + indentation + lines[i] + '\n'
else:
output[0] += '\n'
def print_headers ():
if data.printed_headers:
return
output[0] += data.headers + '\n'
data.printed_headers = True
def struct_prefix (struct_cursor):
retval = ''
tokens = get_tokens(data.tu, struct_cursor)
open_brace = '{'
struct_ = 'struct'
class_ = 'class'
for i in range(len(tokens)):
spelling = tokens[i].spelling
if spelling == open_brace:
break
if spelling == struct_ or spelling == class_:
retval += '\n' + indent(-1)
elif i:
retval += ' '
retval += spelling
return retval
def member_params (cursor):
tokens = get_tokens(data.tu, cursor)
open_brace = '{'
semicolon = ';'
close_paren = ')'
const_token = 'const'
comma = ','
str = ''
constness = ''
identifier_regex = re.compile(r'[_a-zA-Z][_a-zA-Z0-9]*')
probably_args = []
close_paren_seen = False
for i in range(len(tokens)):
spelling = tokens[i].spelling
if identifier_regex.match(spelling) and i < len(tokens) - 1 and (tokens[i + 1].spelling == comma or tokens[i + 1].spelling == close_paren):
probably_args.append(spelling)
if close_paren_seen and spelling == const_token:
constness = 'const'
if spelling == close_paren:
close_paren_seen = True
if spelling == open_brace or spelling == semicolon:
break
if i:
str += ' '
str += spelling
args = [x for x in cursor.get_arguments()]
args_str = ''
function_name = cursor.spelling
for i in range(len(args)):
arg_cursor = args[i]
# Sometimes, libclang gets confused. When it does, try our best to
# figure out the parameter names anyway.
if arg_cursor.spelling == '':
args_str = ', '.join(probably_args)
os.write(2,
'''An error has occurred in determining the name of parameter {} of function
{}. This usually occurs when libclang can't figure out the type of the
parameter (often due to a typo or missing include somewhere). We're using
these possibly-wrong, heuristically-determined parameter names instead:
'{}'.\n'''.format(i, function_name, args_str))
break
if i:
args_str += ', '
args_str += arg_cursor.spelling
return_str = cursor.result_type.kind != clang.cindex.TypeKind.VOID and 'return ' or ''
return [str, args_str, return_str, function_name, constness]
def indent_lines (lines):
regex = re.compile(r'\n')
indentation = indent()
return regex.sub('\n' + indentation, indentation + lines)
def find_expansion_lines (lines):
retval = [0] * 3
for i in range(len(lines)):
line = lines[i]
try:
nonvirtual_pos = line.index('{nonvirtual_members}')
except:
nonvirtual_pos = -1
try:
pure_virtual_pos = line.index('{pure_virtual_members}')
except:
pure_virtual_pos = -1
try:
virtual_pos = line.index('{virtual_members}')
except:
virtual_pos = -1
if nonvirtual_pos != -1:
retval[0] = (i, nonvirtual_pos)
elif pure_virtual_pos != -1:
retval[1] = (i, pure_virtual_pos)
elif virtual_pos != -1:
retval[2] = (i, virtual_pos)
return retval
def close_struct ():
lines = data.form_lines
expansion_lines = find_expansion_lines(lines)
lines = map(
lambda line: line.format(
struct_prefix=data.current_struct_prefix,
struct_name=data.current_struct.spelling,
nonvirtual_members='{nonvirtual_members}',
pure_virtual_members='{pure_virtual_members}',
virtual_members='{virtual_members}'
),
lines
)
nonvirtual_members = ''
pure_virtual_members = ''
virtual_members = ''
for function in data.member_functions:
if data.copy_on_write:
nonvirtual_members += \
indentation + function[0] + '\n' + \
indentation + '{ assert(handle_); ' + function[2] + \
(function[4] == 'const' and 'read().' or 'write().') + \
function[3] + '(' + function[1] + ' ); }\n'
else:
nonvirtual_members += \
indentation + function[0] + '\n' + \
indentation + '{ assert(handle_); ' + function[2] + \
'handle_->' + function[3] + \
'(' + function[1] + ' ); }\n'
pure_virtual_members += \
indentation * 2 + 'virtual ' + function[0] + ' = 0;\n'
virtual_members += \
indentation * 2 + 'virtual ' + function[0] + '\n' + \
indentation * 2 + '{ ' + function[2] + \
'value_.' + function[3] + \
'(' + function[1] + ' ); }\n'
nonvirtual_members = nonvirtual_members[:-1]
pure_virtual_members = pure_virtual_members[:-1]
virtual_members = virtual_members[:-1]
lines[expansion_lines[0][0]] = nonvirtual_members
lines[expansion_lines[1][0]] = pure_virtual_members
lines[expansion_lines[2][0]] = virtual_members
output[0] += '\n'
for line in lines:
output[0] += indent_lines(line) + '\n'
def open_namespace (namespace_):
output[0] += '\n' + indent() + 'namespace ' + namespace_.spelling + ' {'
def close_namespace ():
output[0] += '\n' + indent() + '}\n'
class child_visit:
Break = 0
Continue = 1
Recurse = 2
def visit_impl (cursor, parent):
# close open namespaces we have left
enclosing_namespace = parent
while enclosing_namespace != data.tu.cursor and \
enclosing_namespace.kind != clang.cindex.CursorKind.NAMESPACE:
enclosing_namespace = enclosing_namespace.semantic_parent
if enclosing_namespace != data.tu.cursor and \
enclosing_namespace.kind == clang.cindex.CursorKind.NAMESPACE:
while len(data.current_namespaces) and \
enclosing_namespace != data.current_namespaces[-1]:
data.current_namespaces.pop()
close_namespace()
# close open struct if we have left it
enclosing_struct = parent
while enclosing_struct and \
enclosing_struct != data.tu.cursor and \
not struct_kind(enclosing_struct.kind):
enclosing_struct = enclosing_struct.semantic_parent
if enclosing_struct and \
data.current_struct != null_cursor and \
enclosing_struct != data.current_struct:
close_struct()
data.current_struct = null_cursor
data.member_functions = []
location = cursor.location
from_main_file_ = from_main_file(location)
kind = cursor.kind
if kind == clang.cindex.CursorKind.NAMESPACE:
if from_main_file_:
print_headers()
open_namespace(cursor)
data.current_namespaces.append(cursor)
return child_visit.Recurse
elif not from_main_file_:
return child_visit.Continue
elif struct_kind(kind):
if data.current_struct == null_cursor:
print_headers()
data.current_struct = cursor
data.current_struct_prefix = struct_prefix(cursor)
return child_visit.Recurse
elif kind == clang.cindex.CursorKind.CXX_METHOD:
data.member_functions.append(member_params(cursor))
return child_visit.Continue
def visit (cursor, parent=None):
for child in cursor.get_children():
result = visit_impl(child, cursor)
if result == child_visit.Recurse:
if visit(child, cursor) == child_visit.Break:
return child_visit.Break
elif result == child_visit.Break:
return child_visit.Break
elif result == child_visit.Continue:
continue
manual = '''emtypen Users' Manual
emtypen generates type erasure C++ code. It does this to automate much of the
drudgery of creating such types by hand.
Some of this might not make sense if you don't know how type erasure works.
See http://tzlaine.github.io/type_erasure if this is the case.
At the highest level of abstraction, emtypen takes three input files
containing code and generates a single output source file. It uses libclang,
a wrapper around the Clang front end, to do this.
The three input files are the "archetype" file, the "form" file, and the
"header" file. The archetype must always be specified. There are implicit
defaults for the form and header.
The Archetype File
The archetype file contains one or more structs, struct templates, classes
and/or class templates (hereafter generically referred to just as
"archetypes"). Archetypes that are templates produce generated types
("erased types" hereafter) that are also templates.
Each archetype defines the public API that the erased type requires of all the
types that it can hold. The erased type will also contain all the
contructors, assignment operators and other operators defined in the form
provided. It is an error to define any of these fundamental operations in the
archetype; they go in the form instead. Here is an example archetype file:
#ifndef LOGGABLE_INTERFACE_INCLUDED__
#define LOGGABLE_INTERFACE_INCLUDED__
#include <iostream>
struct loggable
{
std::ostream & log (std::ostream & os) const;
};
#endif
Note that this is a complete and valid C++ header. You can syntax check it
with your favorite compiler if you like. emtypen will preserve the include
guard, if any, include directives, if any, and the namespaces in which the
archetypes are declared, if any.
IMPORTANT: Give each function parameter a name. If the parameters in an
archetype's functions are left unnamed, the generated forwarding functions
will be malformed.
Due to libclang limitations, macros and comments are not preserved.
Declarations other than the ones listed above are not preserved (for instance,
function declarations).
The Form File
The form file contains a template-like form that gets filled in with
repetitive code generated from an archetype. The form will be repeated in the
output once for each archetype.
There are certain magic strings in the form that are replaced with generated
code. If you want to create a new | |
noqa: E501
else:
(data) = self.__get_templates_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def get_trigger(self, bt_locator, trigger_locator, **kwargs): # noqa: E501
"""get_trigger # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_trigger(bt_locator, trigger_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str trigger_locator: (required)
:param str fields:
:return: Trigger
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_trigger_with_http_info(bt_locator, trigger_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_trigger_with_http_info(bt_locator, trigger_locator, **kwargs) # noqa: E501
return data
def get_trigger_setting(self, bt_locator, trigger_locator, field_name, **kwargs): # noqa: E501
"""get_trigger_setting # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_trigger_setting(bt_locator, trigger_locator, field_name, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str trigger_locator: (required)
:param str field_name: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_trigger_setting_with_http_info(bt_locator, trigger_locator, field_name, **kwargs) # noqa: E501
else:
(data) = self.__get_trigger_setting_with_http_info(bt_locator, trigger_locator, field_name, **kwargs) # noqa: E501
return data
def get_triggers(self, bt_locator, **kwargs): # noqa: E501
"""get_triggers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_triggers(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str fields:
:return: Triggers
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_triggers_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_triggers_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def get_vcs_labeling_options(self, bt_locator, **kwargs): # noqa: E501
"""get_vcs_labeling_options # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vcs_labeling_options(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:return: VcsLabeling
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_vcs_labeling_options_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_vcs_labeling_options_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def get_vcs_root_entries(self, bt_locator, **kwargs): # noqa: E501
"""get_vcs_root_entries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vcs_root_entries(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str fields:
:return: VcsRootEntries
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_vcs_root_entries_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_vcs_root_entries_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def get_vcs_root_entry(self, bt_locator, vcs_root_locator, **kwargs): # noqa: E501
"""get_vcs_root_entry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vcs_root_entry(bt_locator, vcs_root_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str vcs_root_locator: (required)
:param str fields:
:return: VcsRootEntry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_vcs_root_entry_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_vcs_root_entry_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
return data
def get_vcs_root_entry_checkout_rules(self, bt_locator, vcs_root_locator, **kwargs): # noqa: E501
"""get_vcs_root_entry_checkout_rules # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vcs_root_entry_checkout_rules(bt_locator, vcs_root_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str vcs_root_locator: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_vcs_root_entry_checkout_rules_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_vcs_root_entry_checkout_rules_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
return data
def get_zipped(self, path, bt_locator, **kwargs): # noqa: E501
"""get_zipped # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_zipped(path, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str bt_locator: (required)
:param str base_path:
:param str locator:
:param str name:
:param bool resolve_parameters:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_zipped_with_http_info(path, bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__get_zipped_with_http_info(path, bt_locator, **kwargs) # noqa: E501
return data
def remove_all_templates(self, bt_locator, **kwargs): # noqa: E501
"""remove_all_templates # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_all_templates(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param bool inline_settings:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__remove_all_templates_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__remove_all_templates_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def remove_template(self, bt_locator, template_locator, **kwargs): # noqa: E501
"""remove_template # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_template(bt_locator, template_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str template_locator: (required)
:param bool inline_settings:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__remove_template_with_http_info(bt_locator, template_locator, **kwargs) # noqa: E501
else:
(data) = self.__remove_template_with_http_info(bt_locator, template_locator, **kwargs) # noqa: E501
return data
def replace_agent_requirement(self, bt_locator, agent_requirement_locator, **kwargs): # noqa: E501
"""replace_agent_requirement # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_agent_requirement(bt_locator, agent_requirement_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str agent_requirement_locator: (required)
:param str fields:
:param AgentRequirement body:
:return: AgentRequirement
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__replace_agent_requirement_with_http_info(bt_locator, agent_requirement_locator, **kwargs) # noqa: E501
else:
(data) = self.__replace_agent_requirement_with_http_info(bt_locator, agent_requirement_locator, **kwargs) # noqa: E501
return data
def replace_agent_requirements(self, bt_locator, **kwargs): # noqa: E501
"""replace_agent_requirements # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_agent_requirements(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str fields:
:param AgentRequirements body:
:return: AgentRequirements
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__replace_agent_requirements_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__replace_agent_requirements_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def replace_artifact_dep(self, bt_locator, artifact_dep_locator, **kwargs): # noqa: E501
"""replace_artifact_dep # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_artifact_dep(bt_locator, artifact_dep_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str artifact_dep_locator: (required)
:param str fields:
:param ArtifactDependency body:
:return: ArtifactDependency
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__replace_artifact_dep_with_http_info(bt_locator, artifact_dep_locator, **kwargs) # noqa: E501
else:
(data) = self.__replace_artifact_dep_with_http_info(bt_locator, artifact_dep_locator, **kwargs) # noqa: E501
return data
def replace_artifact_deps(self, bt_locator, **kwargs): # noqa: E501
"""replace_artifact_deps # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_artifact_deps(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str fields:
:param ArtifactDependencies body:
:return: ArtifactDependencies
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__replace_artifact_deps_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__replace_artifact_deps_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def replace_feature(self, bt_locator, feature_id, **kwargs): # noqa: E501
"""replace_feature # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_feature(bt_locator, feature_id, async_req=True)
| |
import copy
import math
from collections import defaultdict
from typing import List, Set
from propositional_logic.syntax import Formula as PropositionalFormula, is_variable
from propositional_logic.semantics import Model
UNSAT = "UNSAT"
SAT = "SAT"
SAT_UNKNOWN = "SAT_UNKNOWN"
class CNFClause:
def __init__(self, positive_literals: Set[str] = None, negative_literals: Set[str] = None):
self.positive_literals = positive_literals if positive_literals is not None else set()
self.negative_literals = negative_literals if negative_literals is not None else set()
self.all_literals = dict.fromkeys(self.positive_literals, True)
self.all_literals.update(dict.fromkeys(self.negative_literals, False))
for pos_var in self.positive_literals:
assert is_variable(pos_var)
for neg_var in self.negative_literals:
assert is_variable(neg_var)
self.is_sat = UNSAT if len(self) == 0 else SAT_UNKNOWN
self.inferred_assignment = None
self.watched_literals = set()
self.update_watched_literals_and_maybe_propagate(dict())
def __repr__(self) -> str:
if len(self) == 0:
return ""
my_repr = "(" * (len(self) - 1)
first_pos = 0
first_neg = 0
pos_literals_list = list(self.positive_literals)
neg_literals_list = list(self.negative_literals)
if len(pos_literals_list) > 0:
my_repr += str(pos_literals_list[0])
first_pos = 1
elif len(neg_literals_list) > 0:
my_repr += "~" + str(neg_literals_list[0])
first_neg = 1
for pos_index in range(first_pos, len(pos_literals_list)):
my_repr += "|" + str(pos_literals_list[pos_index]) + ")"
for neg_index in range(first_neg, len(neg_literals_list)):
my_repr += "|" + "~" + str(neg_literals_list[neg_index]) + ")"
return my_repr
def __eq__(self, other: object) -> bool:
return isinstance(other, CNFClause) \
and self.positive_literals == other.positive_literals \
and self.negative_literals == other.negative_literals
def __ne__(self, other: object) -> bool:
return not self == other
def __hash__(self) -> int:
return hash(str(self))
def __len__(self):
return len(self.all_literals)
def to_PropositionalFormula(self) -> PropositionalFormula:
return PropositionalFormula.parse(str(self))
def is_contain_negation_of_literal(self, variable: str, assignment: bool) -> bool:
return self.all_literals.get(variable, assignment) != assignment
def get_all_variables(self) -> Set[str]:
return set(self.all_literals.keys())
def get_all_literals(self) -> Set[str]:
return {pos for pos in self.positive_literals} | {'~' + neg for neg in self.negative_literals}
def on_backjump(self, model: Model):
self.update_with_new_model(model)
self.update_watched_literals_and_maybe_propagate(model)
return self.inferred_assignment if self.inferred_assignment is not None else self.is_sat
def update_with_new_model(self, model: Model):
for pos in self.positive_literals: # Assuming we have small clauses, but big models
if model.get(pos, False):
self.watched_literals = set()
self.inferred_assignment = None
self.is_sat = SAT
return
for neg in self.negative_literals:
if not model.get(neg, True):
self.watched_literals = set()
self.inferred_assignment = None
self.is_sat = SAT
return
# No literal was satisfied, so SAT_UNKNOWN unless all of them are in the model, and then there's no chance for SAT
if self.get_all_variables().issubset(model.keys()):
self.is_sat = UNSAT
else:
self.is_sat = SAT_UNKNOWN
def sat_value_under_assignment(self, variable: str, assignment: bool):
if self.is_sat in (SAT, UNSAT) or variable not in self.all_literals:
return self.inferred_assignment if self.inferred_assignment is not None else self.is_sat
elif self.inferred_assignment is not None: # We have only one shot to get SAT
return SAT if self.inferred_assignment == (variable, assignment) else UNSAT
elif self.all_literals.get(variable, not assignment) == assignment:
return SAT
return SAT_UNKNOWN
def is_satisfied_under_assignment(self, variable: str, assignment: bool) -> bool:
return self.all_literals.get(variable, not assignment) == assignment
def update_with_new_assignment(self, variable: str, assignment: bool, model: Model):
if self.is_sat in (SAT, UNSAT):
return self.is_sat # No new assignment will change this state, so spare the check
if self.all_literals.get(variable, not assignment) == assignment:
self.is_sat = SAT
self.inferred_assignment = None
self.watched_literals = set()
return SAT
# NOTE: If we're here, the assigned variable is either not in our clause, OR the assignment is not satisfying us
if self.inferred_assignment is not None and self.inferred_assignment[0] == variable:
self.is_sat = UNSAT # When we have an inferred variable, the only chance we'll be SAT is if it's assigned correctly
self.inferred_assignment = None
self.watched_literals = set()
return UNSAT
if variable in self.watched_literals: # We got an un-satisfying assignment to one of out watch literals
self.update_watched_literals_and_maybe_propagate(model)
assert self.is_sat == SAT_UNKNOWN # If we got here, we MUST be SAT_UNKNOWN
return self.inferred_assignment if self.inferred_assignment is not None else self.is_sat
def update_watched_literals_and_maybe_propagate(self, model: Model):
self.watched_literals = set() # Finding 1 watch literals is as difficult as finding 2, so don't keep the old watched_literals
self.inferred_assignment = None
if self.is_sat in (SAT, UNSAT):
return
candidates = self.get_all_variables() - model.keys()
num_to_take = min(2, len(candidates))
if num_to_take >= 1: # Update watched_literals
the_chosen_ones = list(candidates)[:num_to_take]
self.watched_literals = set(the_chosen_ones)
if num_to_take == 1: # Also update inferred_assignment (i.e. propagate)
inferred_variable = the_chosen_ones[0]
self.inferred_assignment = inferred_variable, self.all_literals[inferred_variable]
class CNFFormula:
def __init__(self, clauses: List[CNFClause]):
self.clauses = clauses
self.variable_to_containing_clause = dict()
self.last_result = SAT_UNKNOWN
for clause in self.clauses:
for var in clause.get_all_variables():
current_clauses = self.variable_to_containing_clause.get(var, set())
current_clauses.add(clause)
self.variable_to_containing_clause[var] = current_clauses
def __repr__(self) -> str:
if len(self.clauses) == 0:
return ""
my_repr = "(" * (len(self.clauses) - 1)
my_repr += str(self.clauses[0])
for clause_index in range(1, len(self.clauses)):
my_repr += "&" + str(self.clauses[clause_index]) + ")"
return my_repr
def __eq__(self, other: object) -> bool:
return isinstance(other, CNFFormula) and self.clauses == other.clauses
def __ne__(self, other: object) -> bool:
return not self == other
def __hash__(self) -> int:
return hash(str(self))
def __len__(self):
return len(self.clauses)
def to_PropositionalFormula(self) -> PropositionalFormula:
return PropositionalFormula.parse(str(self))
def get_all_variables(self) -> Set[str]:
return set(self.variable_to_containing_clause.keys())
def count_clauses_satisfied_by_assignment(self, variable: str, assignment: bool):
assert is_variable(variable)
sat_counter = 0
for clause in self.variable_to_containing_clause[variable]:
if clause.is_satisfied_under_assignment(variable, assignment):
sat_counter += 1
return sat_counter
def add_clause(self, new_clause: CNFClause):
self.clauses.append(new_clause)
for var in new_clause.get_all_variables():
current_clauses = self.variable_to_containing_clause.get(var, set())
current_clauses.add(new_clause)
self.variable_to_containing_clause[var] = current_clauses
def on_backjump(self, model: Model):
sat_counter = 0
found_unsat = None
inferred_assignment = SAT_UNKNOWN # If we got one inferred assignment, we'll return it. Otherwise, we'll return SAT_UNKNOWN
for clause in self.clauses:
result = clause.on_backjump(model)
if result == UNSAT:
found_unsat = clause # Just a precaution, if it happens entire formula UNSAT, and we'll catch that in other places
elif result == SAT:
sat_counter += 1
elif result == SAT_UNKNOWN:
continue
else: # Just a precaution, as backjumping preserves propagated assignments
inferred_assignment = result + (clause,)
if found_unsat is not None:
self.last_result = UNSAT, found_unsat
elif sat_counter == len(self.clauses):
self.last_result = SAT
else:
self.last_result = inferred_assignment
def update_with_new_assignment(self, variable: str, assignment: bool, model: Model):
assert is_variable(variable)
are_all_sat = True
found_unsat = None
inferred_assignment = SAT_UNKNOWN # If we got one inferred assignment, we'll return it. Otherwise, we'll return SAT_UNKNOWN
for clause in self.variable_to_containing_clause[variable]:
result = clause.update_with_new_assignment(variable, assignment, model)
if result == UNSAT:
found_unsat = clause # Maybe can return here, but won't make big difference
are_all_sat = False
elif result == SAT:
continue
elif result == SAT_UNKNOWN:
are_all_sat = False
else: # Result is a inferred assignment. Continue looping to make sure not UNSAT. Note that means inferred_assignment might change
inferred_assignment = result + (clause,)
are_all_sat = False
if found_unsat is not None:
self.last_result = UNSAT, found_unsat
elif are_all_sat: # Only if all clauses containing the last assigned var are SAT, bother checking all the rest are SAT, and if not put SAT_UNKOWN
for clause in self.clauses:
if clause.is_sat != SAT:
are_all_sat = False
self.last_result = SAT if are_all_sat else SAT_UNKNOWN
else:
self.last_result = inferred_assignment
class ImplicationGraph:
def __init__(self, decided_variables: Model = None):
decided_variables = dict(decided_variables) if decided_variables is not None else dict()
self.curr_decision_level = 0
self.conflict_clause = None
self.decision_variables = [decided_variables]
self.inferred_variables = [dict()]
self.total_model = dict()
self.total_model.update(decided_variables)
# Map each inferred variable to the clause that caused it, and at which level that was
self.causing_clauses = {variable: (None, self.curr_decision_level) for variable in decided_variables.keys()}
def __repr__(self) -> str:
my_repr = ""
for i in range(self.curr_decision_level):
my_repr += "LEVEL " + str(i) + ": " + "\n" \
+ "Decided: " + str(self.decision_variables[i]) + "\n" \
+ "Inferred: " + str(self.inferred_variables[i]) + "\n"
return my_repr
def __eq__(self, other: object) -> bool:
return isinstance(other, ImplicationGraph) \
and self.decision_variables == other.decision_variables \
and self.inferred_variables == other.inferred_variables \
and self.curr_decision_level == other.curr_decision_level \
and self.causing_clauses == other.causing_clauses
def __ne__(self, other: object) -> bool:
return not self == other
def __hash__(self) -> int:
return hash(str(self))
def __len__(self):
return self.curr_decision_level
def add_decision(self, variable, assignment):
assert is_variable(variable)
assert variable not in self.total_model.keys()
self.curr_decision_level += 1
self.decision_variables.append({variable: assignment})
self.inferred_variables.append(dict())
self.total_model[variable] = assignment
self.causing_clauses[variable] = (None, self.curr_decision_level)
def add_inference(self, variable: str, assignment: bool, causing_clause: CNFClause):
assert is_variable(variable)
assert variable not in self.total_model.keys()
self.inferred_variables[-1].update({variable: assignment})
self.total_model[variable] = assignment
self.causing_clauses[variable] = (causing_clause, self.curr_decision_level)
def get_causing_clause_of_variable(self, variable: str) -> CNFClause:
assert is_variable(variable)
return self.causing_clauses[variable][0]
def get_decision_level_of_variable(self, variable: str) -> int:
assert is_variable(variable)
return self.causing_clauses[variable][1]
def get_causing_variables(self, variable: str) -> Set[str]:
assert is_variable(variable)
causing_clause = self.get_causing_clause_of_variable(variable)
return causing_clause.get_all_variables() if causing_clause is not | |
# with Gaze ------------------
import bpy
import numpy as np
from itertools import repeat
import random
import os
import sys
from sympy.geometry import Point
from numpy import cos, sin
from math import radians
import mathutils
from bpy import context
"""
Render the images, Depth and capture the metadata
Scenario 1 - Only rotate the head with respect to the head bone
Stored in "HeadRot" folder
Scenario 2 - Translate the camera without any head or camera rotation
Stored in "CameraTran" folder
Scenario 3 - Apply Head Rotation and Rotate the camera Centering the Head
with a Yaw within +-45 Degree and Pitch +-30 Degree
Stored in "HeadCameraRotTran" folder
"""
def point_at(obj, target, roll=0):
"""
Rotate obj to look at target
:arg obj: the object to be rotated. Usually the camera
:arg target: the location (3-tuple or Vector) to be looked at
:arg roll: The angle of rotation about the axis from obj to target in radians.
Based on: https://blender.stackexchange.com/a/5220/12947 (ideasman42)
"""
if not isinstance(target, mathutils.Vector):
target = mathutils.Vector(target)
loc = obj.location
# direction points from the object to the target
direction = target - loc
quat = direction.to_track_quat('-Z', 'Y')
# /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py
quat = quat.to_matrix().to_4x4()
rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')
# remember the current location, since assigning to obj.matrix_world changes it
loc = loc.to_tuple()
obj.matrix_world = quat @ rollMatrix
obj.location = loc
def map_tuple_gen(func, tup):
"""
Applies func to each element of tup and returns a new tuple.
>>> a = (1, 2, 3, 4)
>>> func = lambda x: x * x
>>> map_tuple(func, a)
(1, 4, 9, 16)
Based on : https://codereview.stackexchange.com/questions/86753/map-a-function-to-all-elements-of-a-tuple/86756
"""
return tuple(func(itup) for itup in tup)
def generatePoints(r, center, num):
"""
Generate the cartesian co-ordinates of random points on a surface of sphere
Constraints : phi - with (60,120) degree
theta - with (-45,45) degree
:arg r: radius of the sphere
:arg center: center of the sphere
:arg num: number of random locations to be generated
Based on: https://stackoverflow.com/questions/33976911/generate-a-random-sample-of-points-distributed-on-the-surface-of-a-unit-sphere
"""
pts = []
for i in range(0, num):
phi = radians(random.uniform(60, 120))
theta = radians(random.uniform(-45, 45))
x, y, z = (center.x - sin(theta) * sin(phi) * r), (center.y - (cos(theta) * sin(phi) * r)), (
center.z + cos(phi) * r)
pts.append((x, y, z))
return pts
def get_random_color():
''' generate rgb using a list comprehension '''
r, g, b = [random.uniform(0.4, 0.8) for i in range(3)]
return r, g, b, 1
def map_tuple_gen(func, tup):
return tuple(func(itup) for itup in tup)
def getHP():
matrix_empty = bpy.data.objects['empty'].matrix_world
r_empty = tuple(map(round, np.degrees(np.array(matrix_empty.to_euler('XYZ')[0:3])), repeat(4)))
# l_empty = tuple(map(round, matrix_empty.translation, repeat(4)))
return (-r_empty[2], -r_empty[0], r_empty[1])
def getDeltaHP(yaw, pitch, roll):
yawU, pitchU, rollU = getHP()
deltaYaw = yaw - yawU
deltaPitch = pitch - pitchU
deltaRoll = roll - rollU
return deltaYaw, deltaPitch, deltaRoll
def applyHP(yaw, pitch, roll):
# print('\n', round(yaw, 4), round(pitch, 4), round(roll, 4))
arma = bpy.data.objects['Armature']
bpy.context.view_layer.objects.active = arma
bpy.ops.object.mode_set(mode='POSE')
boneHead = arma.pose.bones['G6Beta_Head'] # Head G6Beta_Head
boneNeck = arma.pose.bones['G6Beta_Neck'] # NeckTwist01 G6Beta_Neck
boneHead.rotation_mode = 'XYZ'
boneNeck.rotation_mode = 'XYZ'
boneNeck.rotation_euler = (0, 0, 0)
bpy.context.view_layer.update()
bpy.context.scene.transform_orientation_slots[0].type = 'LOCAL'
if (yaw > 0) & (pitch < 0) & (roll < 0):
# print(1)
boneNeck.rotation_euler.rotate_axis('Y', -radians(yaw)) # yaw
bpy.context.view_layer.update()
boneNeck.rotation_euler.rotate_axis('Z', -radians(roll)) # roll
bpy.context.view_layer.update()
boneNeck.rotation_euler.rotate_axis('X', -radians(pitch)) # pitch
bpy.context.view_layer.update()
elif (yaw < 0) & (pitch > 0) & (roll < 0):
# print(2)
boneNeck.rotation_euler.rotate_axis('Y', -radians(yaw)) # yaw
bpy.context.view_layer.update()
boneNeck.rotation_euler.rotate_axis('Z', -radians(roll)) # roll
bpy.context.view_layer.update()
boneNeck.rotation_euler.rotate_axis('X', -radians(pitch)) # pitch
bpy.context.view_layer.update()
else:
# print(3)
boneNeck.rotation_euler.rotate_axis('X', -radians(pitch)) # pitch
bpy.context.view_layer.update()
boneNeck.rotation_euler.rotate_axis('Z', -radians(roll)) # roll
bpy.context.view_layer.update()
boneNeck.rotation_euler.rotate_axis('Y', -radians(yaw)) # yaw
bpy.context.view_layer.update()
if (yaw > 0) & (pitch < 0) & (roll < 0):
_, _, deltaRoll = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('Z', -radians(deltaRoll)) # roll
bpy.context.view_layer.update()
_, deltaPitch, _ = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('X', -radians(deltaPitch)) # pitch
bpy.context.view_layer.update()
deltaYaw, _, _ = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('Y', -radians(deltaYaw)) # yaw
bpy.context.view_layer.update()
elif (yaw < 0) & (pitch > 0) & (roll < 0):
deltaYaw, _, _ = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('Y', -radians(deltaYaw)) # yaw
bpy.context.view_layer.update()
_, deltaPitch, _ = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('X', -radians(deltaPitch)) # pitch
bpy.context.view_layer.update()
_, _, deltaRoll = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('Z', -radians(deltaRoll)) # roll
bpy.context.view_layer.update()
else:
_, deltaPitch, _ = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('X', -radians(deltaPitch)) # pitch
bpy.context.view_layer.update()
_, _, deltaRoll = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('Z', -radians(deltaRoll)) # roll
bpy.context.view_layer.update()
deltaYaw, _, _ = getDeltaHP(yaw, pitch, roll)
boneNeck.rotation_euler.rotate_axis('Y', -radians(deltaYaw)) # yaw
bpy.context.view_layer.update()
bpy.context.scene.transform_orientation_slots[0].type = 'GLOBAL'
return getHP()
def applyHPtoNeckBone(t1, t2, t3):
bpy.context.object.pose.bones["NeckTwist01"].rotation_euler = tuple(map(radians, (t1, t2, t3))) # G6Beta_Neck
bpy.context.view_layer.update()
scene = context.scene
scene.cycles.device = 'GPU'
prefs = bpy.context.preferences
cprefs = prefs.addons['cycles'].preferences
# Attempt to set GPU device types if available
for compute_device_type in ('CUDA', 'OPENCL', 'NONE'):
try:
cprefs.compute_device_type = compute_device_type
break
except TypeError:
pass
# Enable all CPU and GPU devices
for device in cprefs.devices:
device.use = True
bpy.context.scene.unit_settings.length_unit = 'METERS'
basePath = '/mnt/fastssd/Shubhajit_Stuff/dataCreation/Data/'
baseTexturePath = r"/mnt/fastssd/Shubhajit_Stuff/Code/Background/Colored Brodatz/"
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.cycles.progressive = 'PATH'
bpy.context.scene.cycles.samples = 256 # sbasak01
bpy.context.scene.frame_set(0)
bpy.context.view_layer.update()
camera = bpy.data.objects['Camera']
scene.camera = camera
empty = bpy.data.objects['empty']
empty_init_loc = tuple(map(round, empty.matrix_world.translation, repeat(4)))
camera_init_loc = tuple(camera.location)
camera_init_rotation = tuple(camera.rotation_euler)
material = bpy.data.materials['MaterialPlane001']
material.use_nodes = True
_nodes = material.node_tree.nodes
_material_links = material.node_tree.links
node_principledBsdf = _nodes['Principled BSDF']
node_mat_texture = _nodes.new('ShaderNodeTexImage')
node_mat_texture.location = -50, 90
_material_links.new(node_mat_texture.outputs['Color'], node_principledBsdf.inputs['Base Color'])
light = bpy.data.objects["Light"]
argv = sys.argv
argv = argv[argv.index("--") + 1:]
print(argv)
# biwiId = int(argv[0])
# set real background render
if argv[0] == 'True':
renderReal = True
elif argv[0] == 'False':
renderReal = False
# with open(f'/mnt/fastssd/Shubhajit_Stuff/Code/HeadPose/BIWIPoseLabel/{biwiId:02d}.txt', 'r') as f1:
# data = f1.readlines()
if renderReal:
bpy.data.objects['Plane.005'].hide_render = True
neckBoneDataPath = os.path.join('/mnt/fastssd/Shubhajit_Stuff/Code/HeadPose/neckBoneDataShort',
bpy.data.filepath.split('/')[-1].replace('.blend', '_neckbone.txt'))
with open(neckBoneDataPath, 'r') as f1:
data = f1.readlines()
# --------- Region Head Rotation ------------- #
if False:
dataPath = basePath + '/'.join(bpy.data.filepath.split('/')[-5:-1]) + '/HeadRot'
dataPath = dataPath.replace('Simple', 'HeadPose/Textured')
bpy.data.scenes["Scene"].node_tree.nodes["File Output"].base_path = dataPath
for i in range(0, 3469):
bpy.context.scene.frame_set(i)
textureFileName = "D" + str(random.randint(1, 112)) + "_COLORED.tif"
texture_path = os.path.join(baseTexturePath, textureFileName)
node_mat_texture.image = bpy.data.images.load(texture_path)
bpy.data.materials["MaterialLight"].node_tree.nodes["Emission"].inputs[1].default_value = \
random.uniform(20.0, 25.0) # light intensity
light.location.x = random.uniform(-2.0, 2.0)
light.location.z = random.uniform(0.8, 2.8)
camera.location = camera_init_loc
bpy.context.view_layer.update()
# backGroundFilePath = os.path.join(r'/mnt/fastssd/Shubhajit_Stuff/Code/Background/RealBackGround/', f'rgb_{random.randint(1, 40):02d}.jpg')
# bpy.data.scenes["Scene"].node_tree.nodes["Image"].image.filepath = backGroundFilePath
bpy.data.scenes["Scene"].node_tree.nodes["File Output"].file_slots[0].path = f'rgb'
# bpy.data.scenes["Scene"].render.filepath = dataPath + f'\\depth_{i:04d}.png'
textFilePath = dataPath + f'/data_{i:04d}.txt'
matrix_empty = bpy.data.objects['empty'].matrix_world
r_empty = tuple(map(round, np.degrees(np.array(matrix_empty.to_euler('XYZ')[0:3])), repeat(4)))
l_empty = tuple(map(round, matrix_empty.translation, repeat(4)))
if r_empty[0] > 25:
camera.location.y = camera.location.y - 0.15
if r_empty[1] > 25 and r_empty[2] > 25:
camera.location.y = camera.location.y - 0.15
matrix_camera = bpy.data.objects['Camera'].matrix_world
r_camera = tuple(map(round, np.degrees(np.array(matrix_camera.to_euler('XYZ')[0:3])), repeat(4)))
bpy.ops.render.render(write_still=True)
# print('Yaw %.2f Pitch %.2f Roll %.2f' % (r_empty[2], r_empty[0], r_empty[1]))
# print('Yaw %.2f Pitch %.2f Roll %.2f' % (r_camera[2], r_camera[0], r_camera[1]))
f = open(textFilePath, "w+")
f.write('Camera Location: ' + str(tuple(map(round, matrix_camera.translation, repeat(4)))) + '\n')
f.write('Head Point Location: ' + str(empty_init_loc) + '\n')
f.write('Head Point Current Location: ' + str(l_empty) + '\n')
f.write('Camera Rotation: ' + 'Yaw %.2f Pitch %.2f Roll %.2f' % (r_camera[2], r_camera[0], r_camera[1]) + '\n')
f.write('Head Rotation: ' + 'Yaw %.2f Pitch %.2f Roll %.2f' % (r_empty[2], r_empty[0], r_empty[1]) + '\n')
f.close()
if True:
bpy.data.objects["RL_G6_Hair"].hide_render = True
dataPath = basePath + '/'.join(bpy.data.filepath.split('/')[-5:-1])
dataPath = dataPath.replace('Simple', 'HeadPose/Textured')
bpy.data.scenes["Scene"].node_tree.nodes["File Output"].base_path = dataPath
i = 0
print(len(data))
for k in range(1):
# for idx, hp in enumerate(data):
for idx in range(10994, len(data)): # len(data)
hp = data[idx]
print(hp)
textureFileName = "D" + str(random.randint(1, 112)) + "_COLORED.tif"
texture_path = os.path.join(baseTexturePath, textureFileName)
node_mat_texture.image = bpy.data.images.load(texture_path)
bpy.data.materials["MaterialLight"].node_tree.nodes["Emission"].inputs[1].default_value = \
random.uniform(20.0, 25.0) # light intensity
light.location.x = random.uniform(-2.0, 2.0)
light.location.z = random.uniform(0.8, 2.8)
camera.location = camera_init_loc
# # print('From here : ', tuple(map(float, hp.strip().split(','))))
# y, p, r = tuple(map(float, hp.strip().split(',')))
#
# if k > 0:
# y, p, r = y+random.uniform(-2,2), p+random.uniform(-2,2), r+random.uniform(-2, 2)
# y, p, r = hp[0], hp[1], hp[2]
#
# applyHP(y, p, r)
flag, t1, t2, t3 = tuple(map(float, hp.strip().split(' ')))
applyHPtoNeckBone(t1, t2, t3)
# print(t1, t2, t3)
bpy.context.view_layer.update()
if renderReal:
backGroundFilePath = os.path.join(r'/mnt/fastssd/Shubhajit_Stuff/Code/Background/RealBackGround/', f'a{random.randint(1, 20):02d}new.jpg')
bpy.data.scenes["Scene"].node_tree.nodes["Image"].image.filepath = backGroundFilePath
bpy.data.scenes["Scene"].node_tree.nodes["File Output"].file_slots[0].path = f'rgb{idx:04d}'
textFilePath = dataPath + f'/data_{idx:04d}.txt'
matrix_empty = bpy.data.objects['empty'].matrix_world
r_empty = tuple(map(round, np.degrees(np.array(matrix_empty.to_euler('XYZ')[0:3])), repeat(4)))
l_empty = tuple(map(round, matrix_empty.translation, repeat(4)))
if int(flag) == 1:
camera.location.y = camera.location.y - 0.1
matrix_camera = bpy.data.objects['Camera'].matrix_world
r_camera = tuple(map(round, np.degrees(np.array(matrix_camera.to_euler('XYZ')[0:3])), repeat(4)))
bpy.ops.render.render(write_still=True)
f = open(textFilePath, "w+")
# f.write('Camera Location: ' + str(tuple(map(round, matrix_camera.translation, repeat(4)))) + '\n')
# f.write('Head Point Location: ' + str(empty_init_loc) + '\n')
# f.write('Head Point Current Location: ' + str(l_empty) + '\n')
# f.write('Camera Rotation: ' + 'Yaw | |
one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
FUNNEL_START_DOCSTRING = r"""
The Funnel Transformer model was proposed in `Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
Language Processing <https://arxiv.org/abs/2006.03236>`__ by <NAME>, <NAME>, <NAME>, <NAME>.
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Parameters:
config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
FUNNEL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.FunnelTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"""
The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
decoder) or any task-specific head on top.
""",
FUNNEL_START_DOCSTRING,
)
class TFFunnelBaseModel(TFFunnelPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.funnel = TFFunnelBaseLayer(config, name="funnel")
@add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="funnel-transformer/small-base",
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(self, inputs, **kwargs):
return self.funnel(inputs, **kwargs)
@add_start_docstrings(
"The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
FUNNEL_START_DOCSTRING,
)
class TFFunnelModel(TFFunnelPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.funnel = TFFunnelMainLayer(config, name="funnel")
@add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="funnel-transformer/small",
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(self, inputs, **kwargs):
return self.funnel(inputs, **kwargs)
@add_start_docstrings(
"""
Funnel model with a binary classification head on top as used during pre-training for identifying generated tokens.
""",
FUNNEL_START_DOCSTRING,
)
class TFFunnelForPreTraining(TFFunnelPreTrainedModel):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.funnel = TFFunnelMainLayer(config, name="funnel")
self.discriminator_predictions = TFFunnelDiscriminatorPredictions(config, name="discriminator_predictions")
@add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFFunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
r"""
Returns:
Examples::
>>> from transformers import FunnelTokenizer, TFFunnelForPreTraining
>>> import torch
>>> tokenizer = TFFunnelTokenizer.from_pretrained('funnel-transformer/small')
>>> model = TFFunnelForPreTraining.from_pretrained('funnel-transformer/small')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors= "tf")
>>> logits = model(inputs).logits
"""
return_dict = return_dict if return_dict is not None else self.funnel.return_dict
if inputs is None and "input_ids" in kwargs and isinstance(kwargs["input_ids"], (dict, BatchEncoding)):
warnings.warn(
"Using `input_ids` as a dictionary keyword argument is deprecated. Please use `inputs` instead."
)
inputs = kwargs["input_ids"]
discriminator_hidden_states = self.funnel(
inputs,
attention_mask,
token_type_ids,
inputs_embeds,
output_attentions,
output_hidden_states,
return_dict=return_dict,
training=training,
)
discriminator_sequence_output = discriminator_hidden_states[0]
logits = self.discriminator_predictions(discriminator_sequence_output)
if not return_dict:
return (logits,) + discriminator_hidden_states[1:]
return TFFunnelForPreTrainingOutput(
logits=logits,
hidden_states=discriminator_hidden_states.hidden_states,
attentions=discriminator_hidden_states.attentions,
)
@add_start_docstrings("""Funnel Model with a `language modeling` head on top. """, FUNNEL_START_DOCSTRING)
class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.funnel = TFFunnelMainLayer(config, name="funnel")
self.lm_head = TFFunnelMaskedLMHead(config, self.funnel.embeddings, name="lm_head")
@add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="funnel-transformer/small",
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
inputs=None,
attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.funnel.return_dict
if isinstance(inputs, (tuple, list)):
labels = inputs[7] if len(inputs) > 7 else labels
if len(inputs) > 7:
inputs = inputs[:7]
elif isinstance(inputs, (dict, BatchEncoding)):
labels = inputs.pop("labels", labels)
outputs = self.funnel(
inputs,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output, training=training)
loss = None if labels is None else self.compute_loss(labels, prediction_scores)
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
FUNNEL_START_DOCSTRING,
)
class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.funnel = TFFunnelBaseLayer(config, name="funnel")
self.classifier = TFFunnelClassificationHead(config, config.num_labels, name="classifier")
@add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="funnel-transformer/small-base",
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
inputs=None,
attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.funnel.return_dict
if isinstance(inputs, (tuple, list)):
labels = inputs[7] if len(inputs) > 7 else labels
if len(inputs) > 7:
inputs = inputs[:7]
elif isinstance(inputs, (dict, BatchEncoding)):
labels = inputs.pop("labels", labels)
outputs = self.funnel(
inputs,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
last_hidden_state = outputs[0]
pooled_output = last_hidden_state[:, 0]
logits = self.classifier(pooled_output, training=training)
loss = None if labels is None else self.compute_loss(labels, logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Funnel Model with a | |
so it is automatically scaled to device coordinates.
The order in which the annotations are drawn is not guaranteed so if it
is important, the z_index property should be set.
Args:
annotation: perrot.plot.Annotation or pero.Glyph
Annotation to be added.
x_axis: str or perrot.plot.Axis
X-axis tag or the axis itself.
y_axis: str or perrot.plot.Axis
Y-axis tag or the axis itself.
overrides: key:value pairs
Specific properties to be set additionally to the annotation.
Returns:
perrot.plot.Annotation
Final annotation object.
"""
# create annotation
if not isinstance(annotation, Annotation):
annotation = Annotation(glyph=annotation, **overrides)
# set new properties
elif overrides:
annotation.set_properties(overrides, True)
# add object
self.add(annotation)
# map axes
self.map(annotation, x_axis, scale='x_scale')
self.map(annotation, y_axis, scale='y_scale')
return annotation
def plot(self, series, x_axis='x_axis', y_axis='y_axis', **overrides):
"""
This method provides a convenient way to add data series to the plot.
Using the axes tags, given series is connected to particular axes'
scales, so it is automatically scaled to device coordinates.
The order in which the series are drawn is not guaranteed so if it is
important, the z_index property should be set.
Args:
series: perrot.plot.Series
Series data to be added.
x_axis: str or perrot.plot.Axis
X-axis tag or the axis itself.
y_axis: str or perrot.plot.Axis
Y-axis tag or the axis itself.
overrides: key:value pairs
Specific properties to be set to the series.
"""
# check type
if not isinstance(series, Series):
message = "Series must be of type perrot.plot.Series! -> %s" % type(series)
raise TypeError(message)
# set new properties
if overrides:
series.set_properties(overrides, True)
# set color
if series.color is UNDEF and self._colors:
series.color = self._colors.scale(series.tag)
# add object
self.add(series)
# map axes
self.map(series, x_axis, scale='x_scale')
self.map(series, y_axis, scale='y_scale')
def zoom(self, axis=None, minimum=None, maximum=None, propagate=True):
"""
Sets given range to specific axis.
If 'axis' is set to None, given range will be applied to all axes.
This make only sense if minimum and maximum are both set to None, so all
the axes will be set to cover full range of connected data series.
If minimum or maximum is set to None, the value will be set by maximum
or minimum value to cover full range of connected data series.
Args:
axis: str or perrot.plot.Axis or None
Unique tag of the axis or the axis itself.
minimum: float or None
Minimum value to be set.
maximum: float or None
Maximum value to be set.
propagate: bool
If set to True, dependent axes will be zoomed accordingly.
"""
# get axes
if axis is None:
axes = self._axes[:]
elif isinstance(axis, Axis):
axes = [axis]
else:
axis = self._graphics[axis]
axes = [axis]
# sort axes by level
axes.sort(key=lambda a: a.level)
# set axes
for item in axes:
# set limits
lo, hi = minimum, maximum
# get limits from series
if lo is None and hi is None:
lo, hi = self.get_series_limits(item)
# check limits
if lo is None:
lo = item.scale.in_range[0]
if hi is None:
hi = item.scale.in_range[1]
# finalize axis
self.finalize_axis(item, lo, hi)
# propagate main axis change
if propagate and axis is not None:
self.finalize_zoom(axis)
def finalize_zoom(self, axes):
"""
For each given axis this method finalizes all child axes according to
individual settings (e.g. autoscale).
Args:
axes: (str,) or (perrot.plot.Axis,)
Unique tags of the axes or the axes itself.
"""
# check for single axis
if isinstance(axes, (str, Axis)):
axes = (axes,)
# get axes
axes = list(axes)
for i, axis in enumerate(axes):
if not isinstance(axis, Axis):
axes[i] = self._graphics[axis]
# ensure unique and sort
axes = list(set(axes))
axes.sort(key=lambda a: a.level)
# get all related axes
related = list(set(c for p in axes for c in self.get_child_axes(p)))
related.sort(key=lambda a: a.level)
# init already changed
changed = set([a.tag for a in axes])
# autoscale related axes
for axis in related:
# skip changed or independent
if axis.tag in changed or axis.static or not axis.autoscale:
continue
# get parent axes
parents = self.get_parent_axes(axis)
if not parents:
continue
# get crop
x_range = None
y_range = None
for parent in parents:
if parent.position in (POS_TOP, POS_BOTTOM):
x_range = parent.scale.in_range
elif parent.position in (POS_LEFT, POS_RIGHT):
y_range = parent.scale.in_range
# get axis limits
start, end = self.get_series_limits(axis, x_range, y_range, exact=False)
if start is None or end is None:
continue
# finalize axis
self.finalize_axis(axis, start, end)
def finalize_axis(self, axis, start, end):
"""
Finalizes range for given axis according to its settings. This ensures
specified limits, margins etc.
Args:
axis: str or perrot.plot.Axis
Unique tag of the axis or the axis itself.
start: float or None
Start value to be set.
end: float or None
End value to be set.
"""
# get axis
if not isinstance(axis, Axis):
axis = self._graphics[axis]
# get range
range_min, range_max = self.get_series_limits(axis.tag)
# use full range if not set
if start is None or end is None:
start = range_min
end = range_max
# check range
if start is None or end is None:
start = axis.empty_range[0]
end = axis.empty_range[1]
# check data limits
if axis.check_limits and range_min is not None and range_max is not None:
if start < range_min and end > range_max:
start = range_min
end = range_max
elif start < range_min:
shift = range_min - start
start += shift
end += shift
elif end > range_max:
shift = end - range_max
start -= shift
end -= shift
# check required values
if axis.includes:
incl_min = min(axis.includes)
incl_max = max(axis.includes)
if start > incl_min:
start = incl_min
if end < incl_max:
end = incl_max
# check range
if start == end and start == 0:
end = 1.
# check symmetry
if axis.symmetric:
maximum = max(abs(start), abs(end))
start, end = -maximum, maximum
# check range
if start == end:
start -= 0.1 * start
end += 0.1 * end
# apply to axis
axis.scale.in_range = (start, end)
def view(self, title=None, width=None, height=None, backend=None, **options):
"""
Shows current plot as interactive viewer app.
Note that is just a convenient scripting shortcut and this method cannot
be used if the plot is already part of any UI app.
Args:
title: str or None
Viewer frame title. If set to None, current plot title is used.
width: float or None
Image width in device units. If set to None, current plot width
is used.
height: float or None
Image height in device units. If set to None, current plot
height is used.
backend: pero.BACKEND or None
Specific backend to be used. The value must be an item from the
pero.BACKEND enum.
options: str:any pairs
Additional parameters for specific backend.
"""
# get size
if width is None:
width = self.width
if height is None:
height = self.height
# init control
from .control import PlotControl
control = PlotControl(graphics=self)
# show viewer
control.show(title, width, height, backend, **options)
def view2(self, title=None, width=None, height=None, backend=None, **options):
"""
Shows current plot as interactive viewer app.
Note that is just a convenient scripting shortcut and this method cannot
be used if the plot is already part of any UI app.
Args:
title: str or None
Viewer frame title. If set to None, current plot title is used.
width: float or None
Image width in device units. If set to None, current plot width
is used.
height: float or None
Image height in device units. If set to None, current plot
height is used.
backend: pero.BACKEND or None
Specific backend to be used. The value must be an item from the
pero.BACKEND enum.
options: str:any pairs
Additional parameters for specific backend.
"""
# get size
if width is None:
width = self.width
if height is None:
height = self.height
# init control
from .control import PlotControl
control = PlotControl(graphics=self)
# show viewer
control.show(title, width, height, backend, **options)
def _init_frames(self, canvas, source, overrides):
| |
Val1: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6, TIntV Val7) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
Val7: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6, TIntV Val7,
TIntV Val8) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
Val7: TVec< TInt,int > const &
Val8: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6, TIntV Val7,
TIntV Val8, TIntV Val9) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
Val7: TVec< TInt,int > const &
Val8: TVec< TInt,int > const &
Val9: TVec< TInt,int > const &
"""
return _snap.TIntIntVV_GetV(*args)
GetV = staticmethod(GetV)
TIntIntVV.Load = new_instancemethod(_snap.TIntIntVV_Load,None,TIntIntVV)
TIntIntVV.Save = new_instancemethod(_snap.TIntIntVV_Save,None,TIntIntVV)
TIntIntVV.LoadXml = new_instancemethod(_snap.TIntIntVV_LoadXml,None,TIntIntVV)
TIntIntVV.SaveXml = new_instancemethod(_snap.TIntIntVV_SaveXml,None,TIntIntVV)
TIntIntVV.__add__ = new_instancemethod(_snap.TIntIntVV___add__,None,TIntIntVV)
TIntIntVV.__eq__ = new_instancemethod(_snap.TIntIntVV___eq__,None,TIntIntVV)
TIntIntVV.__lt__ = new_instancemethod(_snap.TIntIntVV___lt__,None,TIntIntVV)
TIntIntVV.GetMemUsed = new_instancemethod(_snap.TIntIntVV_GetMemUsed,None,TIntIntVV)
TIntIntVV.GetMemSize = new_instancemethod(_snap.TIntIntVV_GetMemSize,None,TIntIntVV)
TIntIntVV.GetPrimHashCd = new_instancemethod(_snap.TIntIntVV_GetPrimHashCd,None,TIntIntVV)
TIntIntVV.GetSecHashCd = new_instancemethod(_snap.TIntIntVV_GetSecHashCd,None,TIntIntVV)
TIntIntVV.Gen = new_instancemethod(_snap.TIntIntVV_Gen,None,TIntIntVV)
TIntIntVV.GenExt = new_instancemethod(_snap.TIntIntVV_GenExt,None,TIntIntVV)
TIntIntVV.IsExt = new_instancemethod(_snap.TIntIntVV_IsExt,None,TIntIntVV)
TIntIntVV.Reserve = new_instancemethod(_snap.TIntIntVV_Reserve,None,TIntIntVV)
TIntIntVV.Clr = new_instancemethod(_snap.TIntIntVV_Clr,None,TIntIntVV)
TIntIntVV.Trunc = new_instancemethod(_snap.TIntIntVV_Trunc,None,TIntIntVV)
TIntIntVV.Pack = new_instancemethod(_snap.TIntIntVV_Pack,None,TIntIntVV)
TIntIntVV.MoveFrom = new_instancemethod(_snap.TIntIntVV_MoveFrom,None,TIntIntVV)
TIntIntVV.Empty = new_instancemethod(_snap.TIntIntVV_Empty,None,TIntIntVV)
TIntIntVV.Len = new_instancemethod(_snap.TIntIntVV_Len,None,TIntIntVV)
TIntIntVV.Reserved = new_instancemethod(_snap.TIntIntVV_Reserved,None,TIntIntVV)
TIntIntVV.Last = new_instancemethod(_snap.TIntIntVV_Last,None,TIntIntVV)
TIntIntVV.LastValN = new_instancemethod(_snap.TIntIntVV_LastValN,None,TIntIntVV)
TIntIntVV.LastLast = new_instancemethod(_snap.TIntIntVV_LastLast,None,TIntIntVV)
TIntIntVV.BegI = new_instancemethod(_snap.TIntIntVV_BegI,None,TIntIntVV)
TIntIntVV.EndI = new_instancemethod(_snap.TIntIntVV_EndI,None,TIntIntVV)
TIntIntVV.GetI = new_instancemethod(_snap.TIntIntVV_GetI,None,TIntIntVV)
TIntIntVV.AddV = new_instancemethod(_snap.TIntIntVV_AddV,None,TIntIntVV)
TIntIntVV.AddSorted = new_instancemethod(_snap.TIntIntVV_AddSorted,None,TIntIntVV)
TIntIntVV.AddBackSorted = new_instancemethod(_snap.TIntIntVV_AddBackSorted,None,TIntIntVV)
TIntIntVV.AddVMerged = new_instancemethod(_snap.TIntIntVV_AddVMerged,None,TIntIntVV)
TIntIntVV.AddUnique = new_instancemethod(_snap.TIntIntVV_AddUnique,None,TIntIntVV)
TIntIntVV.GetVal = new_instancemethod(_snap.TIntIntVV_GetVal,None,TIntIntVV)
TIntIntVV.GetSubValV = new_instancemethod(_snap.TIntIntVV_GetSubValV,None,TIntIntVV)
TIntIntVV.Ins = new_instancemethod(_snap.TIntIntVV_Ins,None,TIntIntVV)
TIntIntVV.Del = new_instancemethod(_snap.TIntIntVV_Del,None,TIntIntVV)
TIntIntVV.DelLast = new_instancemethod(_snap.TIntIntVV_DelLast,None,TIntIntVV)
TIntIntVV.DelIfIn = new_instancemethod(_snap.TIntIntVV_DelIfIn,None,TIntIntVV)
TIntIntVV.DelAll = new_instancemethod(_snap.TIntIntVV_DelAll,None,TIntIntVV)
TIntIntVV.PutAll = new_instancemethod(_snap.TIntIntVV_PutAll,None,TIntIntVV)
TIntIntVV.Swap = new_instancemethod(_snap.TIntIntVV_Swap,None,TIntIntVV)
TIntIntVV.NextPerm = new_instancemethod(_snap.TIntIntVV_NextPerm,None,TIntIntVV)
TIntIntVV.PrevPerm = new_instancemethod(_snap.TIntIntVV_PrevPerm,None,TIntIntVV)
TIntIntVV.GetPivotValN = new_instancemethod(_snap.TIntIntVV_GetPivotValN,None,TIntIntVV)
TIntIntVV.BSort = new_instancemethod(_snap.TIntIntVV_BSort,None,TIntIntVV)
TIntIntVV.ISort = new_instancemethod(_snap.TIntIntVV_ISort,None,TIntIntVV)
TIntIntVV.Partition = new_instancemethod(_snap.TIntIntVV_Partition,None,TIntIntVV)
TIntIntVV.QSort = new_instancemethod(_snap.TIntIntVV_QSort,None,TIntIntVV)
TIntIntVV.Sort = new_instancemethod(_snap.TIntIntVV_Sort,None,TIntIntVV)
TIntIntVV.IsSorted = new_instancemethod(_snap.TIntIntVV_IsSorted,None,TIntIntVV)
TIntIntVV.Shuffle = new_instancemethod(_snap.TIntIntVV_Shuffle,None,TIntIntVV)
TIntIntVV.Reverse = new_instancemethod(_snap.TIntIntVV_Reverse,None,TIntIntVV)
TIntIntVV.Merge = new_instancemethod(_snap.TIntIntVV_Merge,None,TIntIntVV)
TIntIntVV.Intrs = new_instancemethod(_snap.TIntIntVV_Intrs,None,TIntIntVV)
TIntIntVV.Union = new_instancemethod(_snap.TIntIntVV_Union,None,TIntIntVV)
TIntIntVV.Diff = new_instancemethod(_snap.TIntIntVV_Diff,None,TIntIntVV)
TIntIntVV.IntrsLen = new_instancemethod(_snap.TIntIntVV_IntrsLen,None,TIntIntVV)
TIntIntVV.UnionLen = new_instancemethod(_snap.TIntIntVV_UnionLen,None,TIntIntVV)
TIntIntVV.Count = new_instancemethod(_snap.TIntIntVV_Count,None,TIntIntVV)
TIntIntVV.SearchBin = new_instancemethod(_snap.TIntIntVV_SearchBin,None,TIntIntVV)
TIntIntVV.SearchForw = new_instancemethod(_snap.TIntIntVV_SearchForw,None,TIntIntVV)
TIntIntVV.SearchBack = new_instancemethod(_snap.TIntIntVV_SearchBack,None,TIntIntVV)
TIntIntVV.SearchVForw = new_instancemethod(_snap.TIntIntVV_SearchVForw,None,TIntIntVV)
TIntIntVV.IsIn = new_instancemethod(_snap.TIntIntVV_IsIn,None,TIntIntVV)
TIntIntVV.IsInBin = new_instancemethod(_snap.TIntIntVV_IsInBin,None,TIntIntVV)
TIntIntVV.GetDat = new_instancemethod(_snap.TIntIntVV_GetDat,None,TIntIntVV)
TIntIntVV.GetAddDat = new_instancemethod(_snap.TIntIntVV_GetAddDat,None,TIntIntVV)
TIntIntVV.GetMxValN = new_instancemethod(_snap.TIntIntVV_GetMxValN,None,TIntIntVV)
TIntIntVV_swigregister = _snap.TIntIntVV_swigregister
TIntIntVV_swigregister(TIntIntVV)
def TIntIntVV_SwapI(*args):
"""
TIntIntVV_SwapI(TIntV LVal, TIntV RVal)
Parameters:
LVal: TVec< TVec< TInt,int >,int >::TIter
RVal: TVec< TVec< TInt,int >,int >::TIter
"""
return _snap.TIntIntVV_SwapI(*args)
def TIntIntVV_GetV(*args):
"""
GetV(TIntV Val1) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6, TIntV Val7) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
Val7: TVec< TInt,int > const &
GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6, TIntV Val7,
TIntV Val8) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
Val7: TVec< TInt,int > const &
Val8: TVec< TInt,int > const &
TIntIntVV_GetV(TIntV Val1, TIntV Val2, TIntV Val3, TIntV Val4, TIntV Val5, TIntV Val6, TIntV Val7,
TIntV Val8, TIntV Val9) -> TIntIntVV
Parameters:
Val1: TVec< TInt,int > const &
Val2: TVec< TInt,int > const &
Val3: TVec< TInt,int > const &
Val4: TVec< TInt,int > const &
Val5: TVec< TInt,int > const &
Val6: TVec< TInt,int > const &
Val7: TVec< TInt,int > const &
Val8: TVec< TInt,int > const &
Val9: TVec< TInt,int > const &
"""
return _snap.TIntIntVV_GetV(*args)
class TIntIntVH(object):
"""Proxy of C++ THash<(TInt,TVec<(TInt,int)>)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
HashPrimes = _snap.TIntIntVH_HashPrimes
def __init__(self, *args):
"""
__init__(THash<(TInt,TVec<(TInt,int)>)> self) -> TIntIntVH
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TIntIntVH Hash) -> TIntIntVH
Parameters:
Hash: THash< TInt,TVec< TInt,int > > const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, int const & ExpectVals, bool const & _AutoSizeP=False) -> TIntIntVH
Parameters:
ExpectVals: int const &
_AutoSizeP: bool const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, int const & ExpectVals) -> TIntIntVH
Parameters:
ExpectVals: int const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TSIn SIn) -> TIntIntVH
Parameters:
SIn: TSIn &
"""
_snap.TIntIntVH_swiginit(self,_snap.new_TIntIntVH(*args))
def Load(self, *args):
"""
Load(TIntIntVH self, TSIn SIn)
Parameters:
SIn: TSIn &
"""
return _snap.TIntIntVH_Load(self, *args)
def Save(self, *args):
"""
Save(TIntIntVH self, TSOut SOut)
Parameters:
SOut: TSOut &
"""
return _snap.TIntIntVH_Save(self, *args)
def LoadXml(self, *args):
"""
LoadXml(TIntIntVH self, PXmlTok const & XmlTok, TStr Nm="")
Parameters:
XmlTok: PXmlTok const &
Nm: TStr const &
LoadXml(TIntIntVH self, PXmlTok const & XmlTok)
Parameters:
XmlTok: PXmlTok const &
"""
return _snap.TIntIntVH_LoadXml(self, *args)
def SaveXml(self, *args):
"""
SaveXml(TIntIntVH self, TSOut SOut, TStr Nm)
Parameters:
SOut: TSOut &
Nm: TStr const &
"""
return _snap.TIntIntVH_SaveXml(self, *args)
def __eq__(self, *args):
"""
__eq__(TIntIntVH self, TIntIntVH Hash) -> bool
Parameters:
Hash: THash< TInt,TVec< TInt,int > > const &
"""
return _snap.TIntIntVH___eq__(self, *args)
def __lt__(self, *args):
"""
__lt__(TIntIntVH self, TIntIntVH Hash) -> bool
Parameters:
Hash: THash< TInt,TVec< TInt,int > > const &
"""
return _snap.TIntIntVH___lt__(self, *args)
def __call__(self, *args):
"""
__call__(TIntIntVH self, TInt Key) -> TIntV
Parameters:
Key: TInt const &
"""
return _snap.TIntIntVH___call__(self, *args)
def GetMemUsed(self):
"""
GetMemUsed(TIntIntVH self) -> ::TSize
Parameters:
self: THash< TInt,TVec< TInt,int > > const *
"""
return _snap.TIntIntVH_GetMemUsed(self)
def BegI(self):
"""
BegI(TIntIntVH self) -> THash< TInt,TVec< TInt,int > >::TIter
Parameters:
self: THash< TInt,TVec< TInt,int > > const *
"""
return _snap.TIntIntVH_BegI(self)
def EndI(self):
"""
EndI(TIntIntVH self) -> THash< TInt,TVec< TInt,int > >::TIter
Parameters:
self: THash< TInt,TVec< TInt,int > > const *
"""
return _snap.TIntIntVH_EndI(self)
def GetI(self, *args):
"""
GetI(TIntIntVH self, TInt Key) -> THash< TInt,TVec< TInt,int > >::TIter
Parameters:
Key: TInt const &
"""
| |
<reponame>leo-b/xmlschema<filename>xmlschema/validators/global_maps.py
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
"""
This module contains functions and classes for namespaces XSD declarations/definitions.
"""
import warnings
from collections import Counter
from functools import lru_cache
from typing import Union
from ..exceptions import XMLSchemaKeyError, XMLSchemaTypeError, XMLSchemaValueError, \
XMLSchemaWarning
from ..names import XSD_NAMESPACE, XSD_REDEFINE, XSD_OVERRIDE, XSD_NOTATION, \
XSD_ANY_TYPE, XSD_SIMPLE_TYPE, XSD_COMPLEX_TYPE, XSD_GROUP, \
XSD_ATTRIBUTE, XSD_ATTRIBUTE_GROUP, XSD_ELEMENT, XSI_TYPE
from ..helpers import get_qname, local_name, get_extended_qname
from ..namespaces import NamespaceResourcesMap
from . import XMLSchemaNotBuiltError, XMLSchemaModelError, XMLSchemaModelDepthError, \
XsdValidator, XsdComponent, XsdAttribute, XsdSimpleType, XsdComplexType, \
XsdElement, XsdAttributeGroup, XsdGroup, XsdNotation, XsdIdentity, XsdAssert
from .builtins import xsd_builtin_types_factory
#
# Defines the load functions for XML Schema structures
def create_load_function(tag):
def load_xsd_globals(xsd_globals, schemas):
redefinitions = []
for schema in schemas:
target_namespace = schema.target_namespace
for elem in schema.root:
if elem.tag not in {XSD_REDEFINE, XSD_OVERRIDE}:
continue
location = elem.get('schemaLocation')
if location is None:
continue
for child in filter(lambda x: x.tag == tag and 'name' in x.attrib, elem):
qname = get_qname(target_namespace, child.attrib['name'])
redefinitions.append((qname, elem, child, schema, schema.includes[location]))
for elem in filter(lambda x: x.tag == tag and 'name' in x.attrib, schema.root):
qname = get_qname(target_namespace, elem.attrib['name'])
if qname not in xsd_globals:
xsd_globals[qname] = (elem, schema)
else:
try:
other_schema = xsd_globals[qname][1]
except (TypeError, IndexError):
pass
else:
# It's ignored or replaced in case of an override
if other_schema.override is schema:
continue
elif schema.override is other_schema:
xsd_globals[qname] = (elem, schema)
continue
msg = "global {} with name={!r} is already defined"
schema.parse_error(msg.format(local_name(tag), qname))
redefined_names = Counter(x[0] for x in redefinitions)
for qname, elem, child, schema, redefined_schema in reversed(redefinitions):
# Checks multiple redefinitions
if redefined_names[qname] > 1:
redefined_names[qname] = 1
redefined_schemas = [x[-1] for x in redefinitions if x[0] == qname]
if any(redefined_schemas.count(x) > 1 for x in redefined_schemas):
msg = "multiple redefinition for {} {!r}"
schema.parse_error(msg.format(local_name(child.tag), qname), child)
else:
redefined_schemas = {x[-1]: x[-2] for x in redefinitions if x[0] == qname}
for rs, s in redefined_schemas.items():
while True:
try:
s = redefined_schemas[s]
except KeyError:
break
if s is rs:
msg = "circular redefinition for {} {!r}"
schema.parse_error(msg.format(local_name(child.tag), qname), child)
break
if elem.tag == XSD_OVERRIDE:
# Components which match nothing in the target schema are ignored. See the
# period starting with "Source declarations not present in the target set"
# of the paragraph https://www.w3.org/TR/xmlschema11-1/#override-schema.
if qname in xsd_globals:
xsd_globals[qname] = (child, schema)
else:
# Append to a list if it's a redefine
try:
xsd_globals[qname].append((child, schema))
except KeyError:
schema.parse_error("not a redefinition!", child)
except AttributeError:
xsd_globals[qname] = [xsd_globals[qname], (child, schema)]
return load_xsd_globals
load_xsd_simple_types = create_load_function(XSD_SIMPLE_TYPE)
load_xsd_attributes = create_load_function(XSD_ATTRIBUTE)
load_xsd_attribute_groups = create_load_function(XSD_ATTRIBUTE_GROUP)
load_xsd_complex_types = create_load_function(XSD_COMPLEX_TYPE)
load_xsd_elements = create_load_function(XSD_ELEMENT)
load_xsd_groups = create_load_function(XSD_GROUP)
load_xsd_notations = create_load_function(XSD_NOTATION)
class XsdGlobals(XsdValidator):
"""
Mediator class for related XML schema instances. It stores the global
declarations defined in the registered schemas. Register a schema to
add its declarations to the global maps.
:param validator: the origin schema class/instance used for creating the global maps.
:param validation: the XSD validation mode to use, can be 'strict', 'lax' or 'skip'.
"""
_lookup_function_resolver = {
XSD_SIMPLE_TYPE: 'lookup_type',
XSD_COMPLEX_TYPE: 'lookup_type',
XSD_ELEMENT: 'lookup_element',
XSD_GROUP: 'lookup_group',
XSD_ATTRIBUTE: 'lookup_attribute',
XSD_ATTRIBUTE_GROUP: 'lookup_attribute_group',
XSD_NOTATION: 'lookup_notation',
}
def __init__(self, validator, validation='strict'):
super(XsdGlobals, self).__init__(validation)
if not all(hasattr(validator, a) for a in ('meta_schema', 'BUILDERS_MAP')):
raise XMLSchemaValueError(
"The argument {!r} is not an XSD schema validator".format(validator)
)
self.validator = validator
self.namespaces = NamespaceResourcesMap() # Registered schemas by namespace URI
self.missing_locations = [] # Missing or failing resource locations
self.types = {} # Global types (both complex and simple)
self.attributes = {} # Global attributes
self.attribute_groups = {} # Attribute groups
self.groups = {} # Model groups
self.notations = {} # Notations
self.elements = {} # Global elements
self.substitution_groups = {} # Substitution groups
self.identities = {} # Identity constraints (uniqueness, keys, keyref)
self.global_maps = (self.notations, self.types, self.attributes,
self.attribute_groups, self.groups, self.elements)
def __repr__(self):
return '%s(validator=%r, validation=%r)' % (
self.__class__.__name__, self.validator, self.validation
)
def copy(self, validator=None, validation=None):
"""Makes a copy of the object."""
obj = self.__class__(self.validator if validator is None else validator,
validation or self.validation)
obj.namespaces.update(self.namespaces)
obj.types.update(self.types)
obj.attributes.update(self.attributes)
obj.attribute_groups.update(self.attribute_groups)
obj.groups.update(self.groups)
obj.notations.update(self.notations)
obj.elements.update(self.elements)
obj.substitution_groups.update(self.substitution_groups)
obj.identities.update(self.identities)
return obj
__copy__ = copy
def lookup(self, tag, qname):
"""
General lookup method for XSD global components.
:param tag: the expanded QName of the XSD the global declaration/definition \
(eg. '{http://www.w3.org/2001/XMLSchema}element'), that is used to select \
the global map for lookup.
:param qname: the expanded QName of the component to be looked-up.
:returns: an XSD global component.
:raises: an XMLSchemaValueError if the *tag* argument is not appropriate for a global \
component, an XMLSchemaKeyError if the *qname* argument is not found in the global map.
"""
try:
lookup_function = getattr(self, self._lookup_function_resolver[tag])
except KeyError:
msg = "wrong tag {!r} for an XSD global definition/declaration"
raise XMLSchemaValueError(msg.format(tag)) from None
else:
return lookup_function(qname)
def lookup_notation(self, qname: str) -> XsdNotation:
try:
obj = self.notations[qname]
except KeyError:
raise XMLSchemaKeyError(f'xs:notation {qname!r} not found')
else:
if isinstance(obj, XsdNotation):
return obj
return self._build_global(obj, qname, self.notations)
def lookup_type(self, qname: str) -> Union[XsdSimpleType, XsdComplexType]:
try:
obj = self.types[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:simpleType/xs:complexType {qname!r} not found')
else:
if isinstance(obj, (XsdSimpleType, XsdComplexType)):
return obj
return self._build_global(obj, qname, self.types)
def lookup_attribute(self, qname: str) -> XsdAttribute:
try:
obj = self.attributes[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:attribute {qname!r} not found')
else:
if isinstance(obj, XsdAttribute):
return obj
return self._build_global(obj, qname, self.attributes)
def lookup_attribute_group(self, qname: str) -> XsdAttributeGroup:
try:
obj = self.attribute_groups[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:attributeGroup {qname!r} not found')
else:
if isinstance(obj, XsdAttributeGroup):
return obj
return self._build_global(obj, qname, self.attribute_groups)
def lookup_group(self, qname: str) -> XsdGroup:
try:
obj = self.groups[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:group {qname!r} not found')
else:
if isinstance(obj, XsdGroup):
return obj
return self._build_global(obj, qname, self.groups)
def lookup_element(self, qname: str) -> XsdElement:
try:
obj = self.elements[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:element {qname!r} not found')
else:
if isinstance(obj, XsdElement):
return obj
return self._build_global(obj, qname, self.elements)
def _build_global(self, obj, qname, global_map):
if isinstance(obj, tuple):
# Not built XSD global component without redefinitions
try:
elem, schema = obj
except ValueError:
return obj[0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = self.validator.BUILDERS_MAP[elem.tag]
except KeyError:
raise XMLSchemaKeyError("wrong element %r for map %r." % (elem, global_map))
global_map[qname] = obj, # Encapsulate into a tuple to catch circular builds
global_map[qname] = factory_or_class(elem, schema, parent=None)
return global_map[qname]
elif isinstance(obj, list):
# Not built XSD global component with redefinitions
try:
elem, schema = obj[0]
except ValueError:
return obj[0][0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = self.validator.BUILDERS_MAP[elem.tag]
except KeyError:
raise XMLSchemaKeyError("wrong element %r for map %r." % (elem, global_map))
global_map[qname] = obj[0], # To catch circular builds
global_map[qname] = component = factory_or_class(elem, schema, parent=None)
# Apply redefinitions (changing elem involve a re-parsing of the component)
for elem, schema in obj[1:]:
if component.schema.target_namespace != schema.target_namespace:
msg = "redefined schema {!r} has a different targetNamespace"
raise XMLSchemaValueError(msg.format(schema))
component.redefine = component.copy()
component.redefine.parent = component
component.schema = schema
component.elem = elem
return global_map[qname]
else:
raise XMLSchemaTypeError(f"unexpected instance {obj} in global map")
def get_instance_type(self, type_name, base_type, namespaces):
"""
Returns the instance XSI type from global maps, validating it with the reference base type.
:param type_name: the XSI type attribute value, a QName in prefixed format.
:param base_type: the XSD from which the instance type has to be derived.
:param namespaces: a map from prefixes to namespaces.
"""
if base_type.is_complex() and XSI_TYPE in base_type.attributes:
base_type.attributes[XSI_TYPE].validate(type_name)
extended_name = get_extended_qname(type_name, namespaces)
xsi_type = self.lookup_type(extended_name)
if xsi_type.is_derived(base_type):
return xsi_type
elif base_type.is_union() and not base_type.facets:
# Can be valid only if the union doesn't have facets, see:
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=4065
try:
if xsi_type in base_type.primitive_type.member_types:
return xsi_type
except AttributeError:
if xsi_type in base_type.member_types:
return xsi_type
raise XMLSchemaTypeError("%r cannot substitute %r" % (xsi_type, base_type))
@property
def built(self):
return all(schema.built for schema in self.iter_schemas())
@property
def unbuilt(self):
"""Property that returns a list with unbuilt components."""
return [c for s in self.iter_schemas() for c in s.iter_components()
if c is not s and not c.built]
@property
def validation_attempted(self):
if self.built:
return 'full'
elif any(schema.validation_attempted == 'partial' for schema | |
<gh_stars>0
#####################################################################
# The patcher for factory ase.calculator.vasp.Vasp class #
# will change the behavior of the POSCAR writer to use vasp5 format #
#####################################################################
from ase.calculators.vasp.create_input import GenerateVaspInput
from ase.calculators.vasp.create_input import bool_keys, int_keys, float_keys
from ase.calculators.vasp import Vasp
from pymatgen.io.vasp import Vasprun
from pymatgen.electronic_structure.bandstructure import Spin
import os
import os.path
import shutil
from ase.io import read
from .other_vasp import gen_line_path
import numpy
# Tell vasp calculator to write the POSCAR using vasp5 style
def _new_write_input(self, atoms, directory='./', direct=True, vasp5=True):
from ase.io.vasp import write_vasp
from os.path import join
write_vasp(join(directory, 'POSCAR'),
self.atoms_sorted,
direct=direct,
symbol_count=self.symbol_count, vasp5=vasp5)
self.write_incar(atoms, directory=directory)
self.write_potcar(directory=directory)
self.write_kpoints(directory=directory)
self.write_sort_file(directory=directory)
# Hot patch for the GenerateVaspInput class
GenerateVaspInput.write_input = _new_write_input
def _load_vasprun(self, filename="vasprun.xml"):
self.vasprun = Vasprun(filename)
# read the bandgap from vasprun.xml
def _read_bandgap(self):
if not hasattr(self, "vasprun"):
self.load_vasprun()
# From DOS
dos = self.vasprun.complete_dos
bg_dos = dos.get_gap()
# From Band structure
bs = self.vasprun.get_band_structure()
bg_bs = bs.get_band_gap()
# Return the bandgaps calculated by DOS or band structure
return (bg_dos, bg_bs)
def _read_extern_stress(self, form="kB", filename="OUTCAR"):
stress = None
for line in open(filename):
if line.find('external pressure') != -1:
stress = line.split()[3]
if form != "kB":
# in GPa
stress = stress * 0.1 * GPa
return stress
def _copy_files(self, select_names=None,
exclude_names=None,
tag="tag"):
# copy_file is supposed to be used only after the calculation!
if hasattr(self, "tag"):
tag = self.tag
default_names = ["INCAR", "OUTCAR", "WAVECAR", "CONTCAR",
"WAVEDER", "DOSCAR", "vasprun.xml"]
if exclude_names != None:
tmp = [p for p in default_names if p not in exclude_names]
default_names = tmp
elif select_names != None:
default_names = select_names
for fname in default_names:
if os.path.exists(fname):
f_new = ".".join((fname, tag))
shutil.copy(fname, f_new)
# Get the final potential from vasprun.xml
def _get_final_E(self, filename="vasprun.xml"):
v = Vasprun(filename)
fe = v.final_energy.real
return fe
oldrun = Vasp.run
def _run(self):
# Handle the incomplete BSE vasprun problem
oldrun(self)
if os.path.exists("vasprun.xml"):
with open("vasprun.xml", "rb+") as f:
f.seek(-12, 2) # to the -12
s = f.read()
s = s.decode("utf8")
if s.strip() != "</modeling>": # Not the last line
f.seek(0, 2) # To the last
f.write(b"</modeling>\n")
print("Warning! The vasprun.xml seems incomplete.")
# path for writing kpoints
# taken from jasp
def _write_kpoints(self, directory="", fname=None):
"""Write out the KPOINTS file.
The KPOINTS file format is as follows:
line 1: a comment
line 2: number of kpoints
n <= 0 Automatic kpoint generation
n > 0 explicit number of kpoints
line 3: kpt format
if n > 0:
C,c,K,k = cartesian coordinates
anything else = reciprocal coordinates
if n <= 0
M,m,G,g for Monkhorst-Pack or Gamma grid
anything else is a special case
line 4: if n <= 0, the Monkhorst-Pack grid
if n > 0, then a line per kpoint
line 5: if n <=0 it is the gamma shift
After the kpts may be tetrahedra, but we do now support that for
now.
"""
import numpy as np
if fname is None:
fname = os.path.join(directory, 'KPOINTS')
p = self.input_params
kpts = p.get('kpts', None) # this is a list, or None
# kpts_weight = p.get("kpts_weight", None) # weights of the kpoints for BS
if kpts is None:
NKPTS = None
elif len(np.array(kpts).shape) == 1:
NKPTS = 0 # automatic
else:
NKPTS = len(p['kpts'])
# figure out the mode
if NKPTS == 0 and not p.get('gamma', None):
MODE = 'm' # automatic monkhorst-pack
elif NKPTS == 0 and p.get('gamma', None):
MODE = 'g' # automatic gamma monkhorst pack
# we did not trigger automatic kpoints
elif p.get('kpts_nintersections', None) is not None:
MODE = 'l'
elif p.get('reciprocal', None) is True:
MODE = 'r'
else:
MODE = 'c'
with open(fname, 'w') as f:
# line 1 - comment
comm = 'KPOINTS created by Atomic Simulation Environment\n'
if p.get("kpath", None) is not None:
comm = "KPATH: {} \n".format(p.get("kpath", None))
f.write(comm)
# line 2 - number of kpts
if MODE in ['c', 'k', 'm', 'g', 'r']:
f.write('{}\n'.format(NKPTS))
elif MODE in ['l']: # line mode, default intersections is 10
f.write('{}\n'.format(p.get('kpts_nintersections')))
# line 3
if MODE in ['m', 'g', 'l']:
if MODE == 'm':
f.write('Monkhorst-Pack\n') # line 3
elif MODE == 'g':
f.write('Gamma\n')
else:
f.write("Line mode\n")
elif MODE in ['c', 'k']:
f.write('Cartesian\n')
else:
f.write('Reciprocal\n')
# line 4
if MODE in ['m', 'g']:
f.write('{0:<9} {1:<9} {2:<9}\n'.format(*p.get('kpts', (1, 1, 1))))
elif MODE in ['c', 'k', 'r']:
for n in range(NKPTS):
# I assume you know to provide the weights
f.write('{0:<9} {1:<9} {2:<9} {3:<4}\n'.format(*p['kpts'][n]))
elif MODE in ['l']:
if p.get('reciprocal', None) is False:
f.write('Cartesian\n')
else:
f.write('Reciprocal\n')
for n in range(NKPTS):
f.write('{0:<9} {1:<9} {2:<9} 1\n'.format(*p['kpts'][n]))
# line 5 - only if we are in automatic mode
if MODE in ['m', 'g']:
if p.get('gamma', None):
f.write('{0:<9} {1:<9} {2:<9}\n'.format(*p['gamma']))
else:
f.write('0.0 0.0 0.0\n')
# Patch method for get the atoms from previous calculation
def read_atoms_sorted(path=""):
f_sort = os.path.join(path, 'ase-sort.dat')
f_contcar = os.path.join(path, "CONTCAR")
if os.path.isfile(f_sort):
sort = []
resort = []
line = None
with open(f_sort, 'r') as dat_sort:
lines = dat_sort.readlines()
for line in lines:
data = line.split()
sort.append(int(data[0]))
resort.append(int(data[1]))
atoms = read(f_contcar, format='vasp')[resort]
else:
atoms = read(f_contcar, format='vasp')
return atoms
# Hot patch to the Vasp class
Vasp.read_bandgap = _read_bandgap
Vasp.load_vasprun = _load_vasprun
Vasp.read_extern_stress = _read_extern_stress
Vasp.copy_files = _copy_files
Vasp.read_final_E = _get_final_E
Vasp.write_kpoints = _write_kpoints
Vasp.run = _run
# Add missing keys
bool_keys += ["lusew",
"ladder",
"lhartree",
"lpead",
"lvdwexpansion",
"lorbitalreal"]
int_keys += ["antires",
"omegamax",
]
# Patching the vasprun.xml for BSE calculations
@property
def _converged_electronic(self):
"""
Checks that electronic step convergence has been reached in the final
ionic step
"""
try:
final_esteps = self.ionic_steps[-1]["electronic_steps"]
except IndexError:
return False # no actual ionic steps
if 'LEPSILON' in self.incar and self.incar['LEPSILON']:
i = 1
to_check = set(['e_wo_entrp', 'e_fr_energy', 'e_0_energy'])
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def optical_transitions(self):
# Get optical transitions of BSE calculation
from xml.etree import ElementTree as ET
import numpy
ep = None
for event, elem in ET.iterparse(self.filename):
if ("name" in elem.attrib) and (elem.attrib["name"] == "opticaltransitions"):
ep = elem
break
ot_array = []
for v in ep:
# print(v)
ot_array.append(list(map(float, v.text.strip().split())))
ot_array = numpy.array(ot_array)
return ot_array
def distance(a, b, lattice=[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]):
if len(a) != len(b):
raise ValueError("a and b should be of same dimension!")
a_ = [sum([lattice[i][j] * a[i] for i in range(len(a))]) for j in range(len(lattice))]
b_ = [sum([lattice[i][j] * b[i] for i in range(len(b))]) for j in range(len(lattice))]
par_dis = [(a_[i] - b_[i]) ** 2 for i in range(len(a))]
return sum(par_dis) ** 0.5
def is_on_path(p, kpath, eps=1e-6,
lattice=[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]):
# kpath is a list of points
flag = False
tot_dis = 0
for i in range(len(kpath) - 1):
start = kpath[i]
end = kpath[i + 1]
# print(start, end)
# print(distance(start, p),
# distance(p, end),
# distance(start, end))
if abs(distance(start, p) + distance(p, end) \
- distance(start, end)) < eps:
flag = flag or True
tot_dis = tot_dis + distance(start, p,
lattice=lattice)
if flag is True:
return flag, tot_dis
else:
tot_dis = tot_dis + distance(start, end,
lattice=lattice)
return flag, tot_dis
def get_distance_nodes(kpath,
lattice=[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]):
res = []
tot_dis = 0
res.append(0)
for i in range(len(kpath) - 1):
tot_dis = tot_dis + distance(kpath[i], kpath[i + 1],
lattice=lattice)
res.append(tot_dis)
return res
# Get the eigenvalues from the k-points in the kpath
def get_bands_along_path(self,
kpath=None,
lattice_type=None):
if (kpath is None):
return self.eigenvalues[Spin.up]
elif lattice_type is not None:
path_nodes = gen_line_path(kpath,
lattice_type,
n_int=0)
print(path_nodes)
eig = self.eigenvalues[Spin.up]
n_bands = eig.shape[1]
kpts_path = []
line_distance = []
energies = [[] for i in range(n_bands)]
cbm_kpt = None
cbm_e = 1e4
vbm_kpt = None
vbm_e = -1e4
lat_rec = self.lattice_rec.matrix
# Generate the valid kpoints list
# Get new bandgap
for i in range(len(self.actual_kpoints)):
kpt = self.actual_kpoints[i]
# assert is_on_path(kpt, path_nodes) is True
on_path, dist = is_on_path(kpt, path_nodes,
lattice=lat_rec)
if on_path:
kpts_path.append(kpt)
line_distance.append(dist)
energy_occu = eig[i]
prev_occu = 1
prev_e = -1e4
for j in range(len(energy_occu)):
e, occu = energy_occu[j]
energies[j].append(e)
if (prev_occu > 0) and (occu == 0): # The cbm
if prev_e > vbm_e:
vbm_e = prev_e
vbm_kpt = kpt
if e < cbm_e:
cbm_e = e
cbm_kpt = kpt
prev_e = e
prev_occu = occu
tot_dist = max(line_distance)
print(tot_dist)
nd | |
######################################################################
# CliNER - model.py #
# #
# <NAME> #
# #
# Purpose: Define the model for clinical concept extraction. #
######################################################################
import sys
from sklearn.feature_extraction import DictVectorizer
import os
import random
import math
import io
import numpy as np
from time import localtime, strftime
from collections import defaultdict
from notes.documents import labels as tag2id, id2tag
from tools import flatten, save_list_structure, reconstruct_list
from tools import print_str, print_vec, print_files, write
import DatasetCliner_experimental as Exp
import tensorflow as tf
import entity_lstm as entity_model
import training_predict_LSTM
import pickle
import copy
import helper_dataset as hd
import shutil
class ClinerModel:
def log(self, out, model_file=None):
'''
ClinerModel::log()
Log training information of model.
@param out. Either a filename or file channel to output the log string.
@param model_file. A path to optionally identify where the model was saved.
@return None
'''
if not self._log:
log = self.__log_str(model_file)
else:
log = self._log
# depending on whether it is already opened as a channel
if isinstance(out,type(sys.stdout)):
write(out, '%s\n' % log)
else:
with open(out, 'a') as f:
write(f, '%s\n' % log)
def __log_str_NEURAL(self,model_file=None):
""
def __log_str(self, model_file=None):
'''
ClinerModel::__log_str()
Build a string of information about training for the model's log file.
@param model_file. A path to optionally identify where the model was saved.
@return A string of the model's training information
'''
assert self._is_trained, 'ClinerModel not trained'
with io.StringIO() as f:
write(f, u'\n')
write(f, '-'*40)
write(f, u'\n\n')
if model_file:
write(f, 'model : %s\n' % os.path.abspath(model_file))
write(f, u'\n')
if self._use_lstm:
write(f, u'modeltype: LSTM\n')
else:
write(f, u'modeltype: CRF\n')
if 'hyperparams' in self._score:
for name,value in self._score['hyperparams'].items():
write(f, u'\t%-10s: %s\n' % (name,value))
write(f, u'\n')
print_str(f, 'features', self._features)
write(f, u'\n')
write(f, u'\n')
write(f, 'training began: %s\n' % self._time_train_begin)
write(f, 'training ended: %s\n' % self._time_train_end)
write(f, u'\n')
write(f, u'scores\n')
print_vec(f, 'train precision', self._score['train']['precision'])
print_vec(f, 'train recall ', self._score['train']['recall' ])
print_vec(f, 'train f1 ', self._score['train']['f1' ])
write(f, self._score['train']['conf'])
if 'dev' in self._score:
print_vec(f, u'dev precision ', self._score['dev']['precision'])
print_vec(f, u'dev recall ', self._score['dev']['recall' ])
print_vec(f, u'dev f1 ', self._score['dev']['f1' ])
write(f, self._score['dev']['conf'])
if 'test' in self._score:
print_vec(f, u'test precision ', self._score['test']['precision'])
print_vec(f, u'test recall ', self._score['test']['recall' ])
print_vec(f, u'test f1 ', self._score['test']['f1' ])
write(f, self._score['test']['conf'])
if 'history' in self._score:
for label,vec in self._score['history'].items():
print_vec(f, '%-16s'%label, vec)
write(f, u'\n')
if self._training_files:
write(f, u'\n')
write(f, u'Training Files\n')
if len(self._training_files) < 200:
print_files(f, self._training_files)
else:
write(f, '\t%d files\n'%len(self._training_files))
write(f, u'\n')
write(f, u'-'*40)
write(f, u'\n\n')
# get output as full string
contents = f.getvalue()
return contents
def __init__(self, use_lstm):
"""
ClinerModel::__init__()
Instantiate a ClinerModel object.
@param use_lstm. Bool indicating whether to train a CRF or LSTM.
"""
self._use_lstm = use_lstm
self._is_trained = False
self._clf = None
self._vocab = None
self._training_files = None
self._log = None
self._text_feats = None
# Import the tools for either CRF or LSTM
if use_lstm:
# NEW
# import DatasetCliner_experimental as Exp
# import tensorflow as tf
# import entity_lstm as entity_model
# import training_predict_LSTM
# import pickle
# import copy
# import helper_dataset as hd
# import shutil
self._pretrained_dataset=None
self._pretrained_wordvectors=None
self._current_model=None
self._parameters=None
def train(self, train_notes, val=[], test=[]):
"""
ClinerModel::train()
Purpose: Train a Machine Learning model on annotated data
@param notes. A list of Note objects (containing text and annotations)
@return None
"""
# Extract formatted data
train_sents = flatten([n.getTokenizedSentences() for n in train_notes])
train_labels = flatten([n.getTokenLabels() for n in train_notes])
if test:
test_sents = flatten([n.getTokenizedSentences() for n in test])
test_labels = flatten([n.getTokenLabels() for n in test])
else:
test_sents = []
test_labels = []
if val:
print ("VAL")
val_sents = flatten([n.getTokenizedSentences() for n in val])
val_labels = flatten([n.getTokenLabels() for n in val])
self.train_fit(train_sents,train_labels,val_sents=val_sents,val_labels=val_labels,test_sents=test_sents,test_labels=test_labels)
else:
print ("NO DEV")
self.train_fit(train_sents, train_labels, dev_split=0.1,
test_sents=test_sents, test_labels=test_labels)
self._train_files = [ n.getName() for n in train_notes+val ]
def train_fit(self, train_sents, train_labels, val_sents=None, val_labels=None,
test_sents=None, test_labels=None, dev_split=None):
"""
ClinerModel::train_fit()
Purpose: Train clinical concept extraction model using annotated data.
@param train_sents. A list of sentences, where each sentence is tokenized into words.
@param train_labels. Parallel to 'train_sents', 7-way labels for concept spans.
@param val_sents. Validation data. Same format as tokenized_sents
@param val_labels. Validation data. Same format as iob_nested_labels
@param dev_split A real number from 0 to 1
"""
# metadata
self._time_train_begin = strftime("%Y-%m-%d %H:%M:%S", localtime())
# train classifier
if self._use_lstm==False:
voc, clf, dev_score, enabled_features = generic_train('all',
train_sents ,
train_labels ,
self._use_lstm ,
val_sents=val_sents ,
val_labels=val_labels ,
test_sents=test_sents ,
test_labels=test_labels ,
dev_split=dev_split )
self._is_trained = True
self._vocab = voc
self._clf = clf
self._score = dev_score
self._features = enabled_features
# metadata
self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
print ("IN ERROR CHECK")
print (dev_split)
parameters,dataset,best = generic_train('all',
train_sents ,
train_labels ,
self._use_lstm ,
val_sents=val_sents ,
val_labels=val_labels ,
test_sents=test_sents ,
test_labels=test_labels ,
dev_split=dev_split )
self._is_trained = True
self.pretrained_dataset=dataset
self.parameters=parameters
self._score=best
self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
print ("BEST EPOCH")
print (best)
#self._vocab = voc
#self._clf = clf
#self._score = dev_score
#self._features = enabled_features
# metadata
#self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
def predict_classes_from_document(self, document):
"""
ClinerModel::predict_classes_from_documents()
Predict concept annotations for a given document
@param note. A Document object (containing text and annotations)
@return List of predictions
"""
# Extract formatted data
tokenized_sents = document.getTokenizedSentences()
return self.predict_classes(tokenized_sents)
def predict_classes(self, tokenized_sents):
"""
ClinerModel::predict_classes()
Predict concept annotations for unlabeled, tokenized sentences
@param tokenized_sents. A list of sentences, where each sentence is tokenized
into words
@return List of predictions
"""
hyperparams = {}
# Predict labels for prose
if self._use_lstm:
if self.parameters==None:
hyperprams['parameters'] = hd.load_parameters_from_file("LSTM_parameters.txt")
if self._pretrained_dataset==None:
temp_pretrained_dataset = os.path.join(hyperparams['parameters']['model_folder'],
"dataset.pickle")
hyperparams['pretrained_dataset'] = pickle.load(open(temp_pretrained_dataset_adress, 'rb'))
vectorized_pred = generic_predict('all' ,
tokenized_sents ,
vocab = self._vocab ,
clf = self._clf ,
use_lstm = self._use_lstm,
hyperparams = hyperparams)
#pretrained_dataset=self._pretrained_dataset,
#tokens_to_vec=self._pretrained_wordvector,
#current_model=self._current_model,
#parameters=self.parameters)
#self._current_model=model
if self._use_lstm:
iob_pred = vectorized_pred
else:
iob_pred = [ [id2tag[p] for p in seq] for seq in vectorized_pred ]
return iob_pred
############################################################################
### Lowest-level (interfaces to ML modules) ###
############################################################################
def generic_train(p_or_n, train_sents, train_labels, use_lstm, val_sents=None, val_labels=None, test_sents=None, test_labels=None, dev_split=None):
'''
generic_train()
Train a model that works for both prose and nonprose
@param p_or_n. A string that indicates "prose", "nonprose", or "all"
@param train_sents. A list of sentences; each sentence is tokenized into words
@param train_labels. Parallel to `train_sents`, 7-way labels for concept spans
@param use_lstm Bool indicating whether to train CRF or LSTM.
@param val_sents. Validation data. Same format as train_sents
@param val_labels. Validation data. Same format as train_labels
@param dev_split. A real number from 0 to 1
'''
# Must have data to train on:
if len(train_sents) == 0:
raise Exception('Training must have %s training examples' % p_or_n)
# if you should split the data into train/dev yourself
if (not val_sents) and (dev_split > 0.0) and (len(train_sents)>10):
p = int(dev_split*100)
sys.stdout.write('\tCreating %d/%d train/dev split\n' % (100-p,p))
perm = list(range(len(train_sents)))
random.shuffle(perm)
train_sents = [ train_sents[i] for i in perm ]
train_labels = [ train_labels[i] for i in perm ]
ind = int(dev_split*len(train_sents))
val_sents = train_sents[:ind ]
train_sents = train_sents[ ind:]
val_labels = train_labels[:ind ]
train_labels = train_labels[ ind:]
else:
sys.stdout.write('\tUsing existing validation data\n')
sys.stdout.write('\tvectorizing words %s\n' % p_or_n)
if use_lstm:
print ("TESTING NEW DATSET OBJECT")
dataset = Exp.Dataset()
parameters=hd.load_parameters_from_file("LSTM_parameters.txt")
parameters['use_pretrained_model']=False
Datasets_tokens={}
Datasets_labels={}
Datasets_tokens['train']=train_sents
Datasets_labels['train']=train_labels
if val_sents!=None:
Datasets_tokens['valid']=val_sents
Datasets_labels['valid']=val_labels
if test_sents!=None:
Datasets_tokens['test']=test_sents
Datasets_labels['test']=test_labels
dataset.load_dataset(Datasets_tokens,Datasets_labels,"",parameters)
pickle.dump(dataset, open(os.path.join(parameters['model_folder'], 'dataset.pickle'), 'wb'))
print (Datasets_tokens['valid'][0])
print (Datasets_tokens['test'][0])
parameters['Feature_vector_length']=dataset.feature_vector_size
parameters['use_features_before_final_lstm']=False
parameters['learning_rate']=0.005
sess = tf.Session()
number_of_sent=list(range(len(dataset.token_indices['train'])))
with sess.as_default():
model=entity_model.EntityLSTM(dataset,parameters)
sess.run(tf.global_variables_initializer())
model.load_pretrained_token_embeddings(sess, dataset,parameters)
epoch_number = -1
transition_params_trained = np.random.rand(5+2,5+2)
values={}
values["best"]=0
f1_dictionary={}
f1_dictionary['best']=0
model_saver = tf.train.Saver(max_to_keep=100)
print ("START TRAINING")
parameters['conll_like_result_folder']='/tmp/cliner_eval_%d' % random.randint(0,256)+os.sep
test_temp = os.path.join(parameters['conll_like_result_folder'], 'test/')
train_temp = os.path.join(parameters['conll_like_result_folder'], 'train/')
valid_temp = os.path.join(parameters['conll_like_result_folder'], 'valid/')
os.mkdir(parameters['conll_like_result_folder'])
os.mkdir(test_temp)
os.mkdir(train_temp)
os.mkdir(valid_temp)
while epoch_number<90:
average_loss_per_phrase=0
accuracy_per_phase=0
step = 0
epoch_number += 1
if epoch_number != 0:
sequence_numbers=list(range(len(dataset.token_indices['train'])))
random.shuffle(sequence_numbers)
for sequence_number in sequence_numbers:
loss,accuracy,transition_params_trained=training_predict_LSTM.train_step(sess, dataset, sequence_number, model)
average_loss_per_phrase+=loss
accuracy_per_phase+=accuracy
step += 1
if step % 10 == 0:
print('Training {0:.2f}% done\n'.format(step/len(sequence_numbers)*100))
model_saver.save(sess, os.path.join(parameters['model_folder'], 'model_{0:05d}.ckpt'.format(epoch_number)))
total_loss=average_loss_per_phrase
total_accuracy=accuracy_per_phase
average_loss_per_phrase=average_loss_per_phrase/len(number_of_sent)
accuracy_per_phase=accuracy_per_phase/len(number_of_sent)
if epoch_number>0:
""
f1,predictions=training_predict_LSTM.prediction_step(sess,dataset,"test",model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
f1_train,_=training_predict_LSTM.prediction_step(sess,dataset,"train", model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
f1_valid,_=training_predict_LSTM.prediction_step(sess,dataset,"valid", model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
correctly_predicted_tokens=training_predict_LSTM.compute_train_accuracy(parameters['conll_like_result_folder']+"valid"+os.sep+"epoche_"+str(epoch_number)+".txt")
if f1_dictionary['best']<float(f1_valid):
f1_dictionary['epoche']=epoch_number
f1_dictionary['best']=float(f1_valid)
if values["best"]<correctly_predicted_tokens:
values["epoche"]=epoch_number
| |
<reponame>albarrom/GII_O_MA_21.05<gh_stars>0
import pandas as pd
import plotly.express as px
import dash
from dash import Dash, dcc, html, Input, Output, State
import dash_bootstrap_components as dbc
import numpy as np
#crear un dataframe con toda la informacion de la encuesta
df21 = pd.read_csv ('survey/survey_results_public2021.csv', index_col = [0]) # El indice sera la columna con el ID de la respuesta
#crear un dataframe con toda la informacion de la encuesta
df20 = pd.read_csv ('survey/survey_results_public2020.csv', index_col = [0]) # El indice sera la columna con el ID de la respuesta
df20 #mostrar df ()
#crear un nuevo df copiando solo la columna Age1stCode
df1 = df21[['Age1stCode']]
#normalizar todos los datos.
df1 = df1[df1['Age1stCode'].notna()] #eliminar los nulos
df1.loc[df1["Age1stCode"] == "Younger than 5 years", "Age1stCode"] = "04 - 04 years" #ya hay un 05 anyos en el df.
df1.loc[df1["Age1stCode"] == "Older than 64 years", "Age1stCode"] = "65 - 65 years" #ya hay un 05 anyos en el df.
df1.loc[df1["Age1stCode"] == "5 - 10 years", "Age1stCode"] = "05 - 10 years"
df3 = crime_year = pd.DataFrame(df1['Age1stCode'].value_counts().reset_index().values, columns=["RangoEdad", "count"])
#primero se seleccionan los digitos del string (la columna del df es string) y el resultado se convierte a entero
df3["min"] = df3.RangoEdad.astype(str).str[:2].astype(int) #la edad minima del rango es el primer numero
#cambiar el nombre de los nuevos rangos
df3.loc[df3["RangoEdad"] == "04 - 04 years", "RangoEdad"] = "Younger than 5 years" #ya hay un 05 anyos en el df.
df3.loc[df3["RangoEdad"] == "65 - 65 years", "RangoEdad"] = "Older than 64 years" #ya hay un 05 anyos en el df.
df3["csv"]=2020 #anyadir una columna para diferenciar el csv
#anyadir una columna para distingir el csv
df3["csv"] = 2021
#ordenar los datos del df.
df3.set_index('min',inplace=True)
df2 = df20[['Age1stCode']]
#normalizar todos los datos.
df2 = df2[df2['Age1stCode'].notna()] #eliminar los nulos
df2.loc[df2["Age1stCode"] == "Younger than 5 years", "Age1stCode"] = "4" #ya hay un 05 anyos en el df.
df2.loc[df2["Age1stCode"] == "Older than 85", "Age1stCode"] = "86"
df2['Age1stCode'] = df2.Age1stCode.astype(int) # toda la columna es enteros
#dado que el corte de edad es diferente entre ambos se crean cortes para dividir los datos igual
bins = [1, 5, 10, 18, 25, 35, 45, 55, 65, 75, 85, 86]
df4 = pd.DataFrame(df2['Age1stCode'].value_counts(bins= bins, sort=False).reset_index().values, columns=["Rango", "count"])
df4["min"] = df4.Rango.astype(str).str[6:9].astype(str) #la edad minima del rango es el primer numero
df4.loc[df4["min"] == ", 5", "min"] = "5"
df4.loc[df4["min"] == "10.", "min"] = "10"
df4["min"] = df4["min"].astype(int)
df4["csv"]=2020 #anyadir una columna para diferenciar el csv
df4["RangoEdad"] = ["Younger than 5 years", "05 - 10 years",
"11 - 17 years","18 - 24 years",
"25 - 34 years", "35 - 44 years",
"45 - 54 years","55 - 64 years",
"65 - 74 years","75 - 84 years",
"Older than 85 years"]
#se hace una copia del df.
df= df21.copy()
#normalizar todos los datos.
df.loc[df["MainBranch"] == "None of these", "MainBranch"] = "Other"
df= df.groupby(['MainBranch',],as_index=False).size()
#el % = valor*100 / total
df['porcentaje'] = 100 *df['size']/ df['size'].sum()
# Initialise the app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP],
meta_tags=[{'name': 'viewport', #permite ser responsive en movil
'content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5,'}]
)
server = app.server #heroku
# styling the sidebar
# padding for the page content
CONTENT_STYLE = {
"margin-left": "2rem",
"margin-right": "2rem",
"padding": "2rem 2rem",
#,
}
FOOTER_STYLE = {
"margin-left": "0rem",
"margin-right": "0rem",
"margin-bottom": "0rem",
"margin-top": "0rem",
#"padding": "2rem 1rem",
"background-color": "#f8f9fa",
#"width": "calc(100% - 2rem)",
#"fluid": "True",
}
app.layout = html.Div([
dbc.Navbar([
dbc.Row([ #logo
dbc.Col(html.Img(src="https://appharbor.com/assets/images/stackoverflow-logo.png",
height="35px",className='text-end ms-5')),
dbc.Col(dbc.NavbarBrand("Dashboard", className='text-start')),
#logo
dbc.Col([ #col
dbc.DropdownMenu(
children=[
#IMP: para navegar en la misma pagina:
#1: crear etiquetas id en los html (como si se fuesen a unar en el callback.)
#2. en el navbar, anyadir # delante del href pare referenciar esa id.
#3. poner la etiqueta de external_link=true en el navbar para que funcione en la misma pg
dbc.DropdownMenuItem("Titulo 1", href="#uno", external_link=True),
dbc.DropdownMenuItem("Titulo 2", href="#dos", external_link=True),
],
nav=True,
in_navbar=True,
label="Menu",
className="position-absolute top-0 end-0",
) #dropdown
]) #col
])
], #logo
sticky="top",
),
#colores de fuente: docs: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/alert/
#text-primary: azul
#text-secondary: gris
#text-warning: amarillo
#text-success: verde
#text-info: azul claro
#imp: estos colores solo son validos para el tema bootstrap. Cian tiene otro esquema, ver docs
dbc.Row([ #contenido
dbc.Row([
dbc.Col(html.H1("Titulo 1", id = "uno", className="text-center"))
], justify="center",
style={'color': 'LightBlue'},
), #cabecero
dbc.Row([
dbc.Col([
html.H2('Age - histogram'),
html.P('Different age groups and their recurrence'),
dcc.Graph(id='primero', figure={}),
]#, xs=5, sm=6, md=7, lg=8, xl=10
)
], justify="center"
), #primer grafico
dbc.Row([
dbc.Col([
dcc.Dropdown(id="select_opt", multi=False, value='2021',
options=[ #el usuario va a ver las label.
{"label": "2021", "value": 2021},
{"label": "2020", "value": 2020}],
),
]#, xs=5, sm=6, md=7, lg=8, xl=10
)
]), #dropdown. Eliminar
dbc.Row([
dbc.Col([
html.H2('Main Branch - Pie chart'),
html.P('Correlation between with software development and stackoverflow users'),
dcc.Graph(id='segundo', figure={})
]#, xs=5, sm=6, md=7, lg=8, xl=10
)
], justify="center"
), #segundo grafico
dbc.Row([
dbc.Col([
html.H2('Main Branch - Bar char'),
html.P('Correlation between with software development and stackoverflow users.'),
dcc.Graph(id='tercero', figure={})
]#, xs=5, sm=6, md=7, lg=8, xl=10
#, brand_href="uno",
)
], justify="center",
), #tercer grafico.
#nuevo titulo
dbc.Row([
dbc.Col(html.H1("Titulo dos", id = "dos", className="text-center"))
], justify="center",
style={'color': 'LightBlue'},
), #titulo
dbc.Row([
dbc.Col([
html.H2('Nuevo grafico aqui'),
html.P('Correlation between with software development and stackoverflow users.'),
#dcc.Graph(id='tercero', figure={})
]#, xs=5, sm=6, md=7, lg=8, xl=10
#, brand_href="uno",
)
], justify="center",
), #tercer grafico.
],style=CONTENT_STYLE,
#fluid=True # que el grafico se ajuste al ancho pg
), #contenido
dbc.Row([ #footer
dbc.Col([ #Texto
dbc.Col(html.H4("Info", className="text-center mb-4 d-none d-lg-block")), #mb: margin bottom
dbc.Col(html.P("Stackoverflow survey data has been used to create this dashboard.",
className="ms-5 me-5 d-none d-lg-block")),
#Para realizar este dashboard se han usado los datos de encuestas de Stackoverflow.
# To create this dashboard, data from Stackoverflow surveys have been used.
],className= "border-end",
#"d-flex justify-content-center justify-content-lg-between p-4 border-bottom",
), #sociales
dbc.Col([ # hecho con #ms: margin start, para que quede bonito
dbc.Col(html.H4("Made with ", className="text-start mb-4 ms-5 d-none d-lg-block")),
html.A(#className='text-center text-primary mb-4'
dbc.Col(html.P("Dash", className='text-start text-secondary ms-5')),
# dbc.Col(html.Img(src="https://logonoid.com/images/stack-overflow-logo.png", height="30px")),
href="https://dash.plotly.com/",
style={"textDecoration": "none"},
), #link github
html.A(#className='text-center text-primary mb-4'
dbc.Col(html.P("Heroku", className='text-start text-secondary ms-5')),
# dbc.Col(html.Img(src="https://logonoid.com/images/stack-overflow-logo.png", height="30px")),
href="https://devcenter.heroku.com/categories/reference",
style={"textDecoration": "none"},
), #link heroku
]#,className="d-flex justify-content-center justify-content-lg-between p-4 border-bottom",
), # Hecho con
dbc.Col([ # links
dbc.Col(html.H4("Interesting links",className="text-start mb-4 ms-5 d-none d-lg-block")),
#
html.A(#className='text-center text-primary mb-4'
dbc.Col(html.P("Stackoverflow dashboard 2021", className='text-start text-secondary ms-5')),
href="https://insights.stackoverflow.com/survey/2021",
style={"textDecoration": "none"},
), #link stack
html.A(#className='text-center text-primary mb-4'
dbc.Col(html.P("Stackoverflow survey", className='text-start text-secondary ms-5')),
# dbc.Col(html.Img(src="https://logonoid.com/images/stack-overflow-logo.png", height="30px")),
href="https://insights.stackoverflow.com/survey?_ga=2.189292843.1285052511.1645528337-438523718.1645528337",
style={"textDecoration": "none"},
), #link stack
]), #links
#colores de fuente: docs: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/alert/
#text-primary: azul
#text-secondary: gris
#text-warning: amarillo
#text-success: verde
#text-info: azul claro
#imp: estos colores solo son validos para el tema bootstrap. Cian tiene otro esquema, ver docs
dbc.Col([ #sociales
dbc.Col(html.H4("Contact ", className="text-start mb-5 ms-5 d-none d-lg-block")),
html.A(#className='text-center text-primary mb-4'
dbc.Col(html.P("Github", className='text-start text-secondary ms-5')),
# dbc.Col(html.Img(src="https://logonoid.com/images/stack-overflow-logo.png", height="30px")),
href="https://github.com/albarrom",
style={"textDecoration": "none"},
), #link github
]#,className="d-flex justify-content-center justify-content-lg-between p-4 border-bottom",
), #sociales
html.Hr(), #barra decorativa
dbc.Row([ # ultima linea
dbc.Col(html.P("2022 TFG - GII_O_MA_21.05", className="text-center")),
])#c ultima linea
], className="text-secondary", # hacer el texto gris
style=FOOTER_STYLE,
)#footer
]) #layout
@app.callback(
Output(component_id='primero', component_property='figure'),
Input(component_id='select_opt', component_property='value'))
def update_graph(option_slctd):
#filtered_df = df[df.year == selected_year]
cfg = [("x", "RangoEdad"), ("y", "count")]
if (option_slctd == 2021):
fig = px.histogram(df3, **{ax: col for ax, col in cfg},
category_orders={'RangoEdad':["Younger than 5 years",
"05 - 10 years", "11 - 17 years", "18 - 24 years", "25 - 34 years", "35 - 44 years",
"45 - 54 years", "55 - 64 years", "Older than 64 years"]},
labels={"count":"# Responses", "RangoEdad":"Age range"})
# 'ggplot2', 'seaborn', 'simple_white', 'plotly',
# 'plotly_white', 'plotly_dark', 'presentation',
# 'xgridoff', 'ygridoff', 'gridon', 'none')
# category_orders={'year':
# force a specific ordering of values per column
# [2013,2012,2011,2010,2009,2008,2007,2006,2005,2004,2003,2002,2001]},)
else: fig = px.histogram(df4, **{ax: col for ax, col in cfg})
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)','paper_bgcolor': 'rgba(0, 0, 0, 0)',}) #fondo transparente
fig.update_layout(xaxis=dict(showgrid=False), yaxis=dict(showgrid=False)) #eliminar grid
return fig
@app.callback( #diagrama de quesito
Output(component_id='segundo', component_property='figure'),
Input(component_id='select_opt', component_property='value'))
def update_graph(optionse):
fig=px.pie(data_frame=df, names=df['MainBranch'], values = df['size'],hole=.3,)
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)','paper_bgcolor': 'rgba(0, 0, 0, 0)',})
return fig
@app.callback( #diagrama de quesito
Output(component_id='tercero', component_property='figure'),
Input(component_id='select_opt', component_property='value'))
def update_graph(optionse):
fig= px.bar(df, x= df['size'],
y=df['MainBranch'],
orientation = "h", # orientacion "h"/"v"
| |
#!/usr/bin/env python3
"""
cifar10.py
CNN for CIFAR-10 dataset
<NAME>
06/30/2020
References:
https://keras.io/getting_started/intro_to_keras_for_engineers/
https://keras.io/guides/training_with_built_in_methods/#api-overview-a-first-endtoend-example
https://keras.io/api/
"""
import numpy as np
import time
import math
import os
import sys
import argparse
import matplotlib.pyplot as plt
from datetime import datetime
from pprint import pprint
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras.utils as np_utils
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.constraints import max_norm
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.layers import (
Activation,
Dense,
Dropout,
Flatten,
BatchNormalization,
)
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers.schedules import (
ExponentialDecay,
InverseTimeDecay,
PolynomialDecay,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
epochs = 200
batch_size = 128
# epochs = 20
# batch_size = 64
# def lr_schedule(epoch):
# lrate = 0.001
# if epoch > 75:
# lrate = 0.0005
# elif epoch > 100:
# lrate = 0.0003
# return lrate
def show_images():
"""
Show samples from each class
"""
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
num_train, img_channels, img_rows, img_cols = X_train.shape
num_test, _, _, _ = X_train.shape
num_classes = len(np.unique(y_train))
# If using tensorflow, set image dimensions order
if K.backend() == "tensorflow":
K.common.set_image_dim_ordering("th")
class_names = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
fig = plt.figure(figsize=(8, 3))
for i in range(num_classes):
ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[])
idx = np.where(y_train[:] == i)[0]
x_idx = X_train[idx, ::]
img_num = np.random.randint(x_idx.shape[0])
im = np.transpose(x_idx[img_num, ::], (1, 2, 0))
ax.set_title(class_names[i])
plt.imshow(im)
plt.show()
def elapsed_time(start, end):
"""
Compute elapsed time.
@param start: start time
@param end: end time
@return: elapsed time (string)
"""
diff = end - start
days, hours, minutes = [0, 0, 0]
s_time = []
if diff > 86400: # day
days = math.floor(diff / 86400)
diff = diff - days * 86400
if diff > 3600: # hour
hours = math.floor(diff / 3600)
diff = diff - hours * 3600
if diff > 60: # minute
minutes = math.floor(diff / 60)
diff = diff - minutes * 60
if days > 0:
s_time = "{0} days {1} hrs {2} min {3:.4f} sec".format(days, hours, minutes, diff)
# print(f"{days} days {hours} hrs {minutes} min {diff:.4f} sec")
elif hours > 0:
s_time = "{0} hrs {1} min {2:.4f} sec".format(hours, minutes, diff)
# print(f"{hours} hrs {minutes} min {diff:.4f} sec")
elif minutes > 0:
s_time = "{0} min {1:.4f} sec".format(minutes, diff)
# print(f"{minutes} min {diff:.4f} sec")
else:
s_time = "{0:.4f} sec".format(diff)
# print(f"{diff: .4f} sec")
return s_time
def timestamp():
"""
Compute timestamp
@return:
"""
# Calling now() function
today = datetime.now()
s_timestamp = "{0}{1:02d}{02:02d}-{3:02d}{4:02d}{5:02d}".format(
today.year, today.month, today.day, today.hour, today.minute, today.second
)
return s_timestamp
def accuracy(test_x, test_y, model):
"""
Compute test accuracy
@param test_x:
@param test_y:
@param model:
@return:
"""
result = model.predict(test_x)
predicted_class = np.argmax(result, axis=1)
true_class = np.argmax(test_y, axis=1)
num_correct = np.sum(predicted_class == true_class)
accuracy = float(num_correct) / result.shape[0]
return accuracy * 100
def plot_model_history(model_history):
"""
Plot model accuracy and loss
@param model_history:
@return:
"""
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# Summarize history for accuracy
axs[0].plot(
range(1, len(model_history.history["acc"]) + 1), model_history.history["acc"]
)
axs[0].plot(
range(1, len(model_history.history["val_acc"]) + 1),
model_history.history["val_acc"],
)
axs[0].set_title("Model Accuracy")
axs[0].set_ylabel("Accuracy")
axs[0].set_xlabel("Epoch")
axs[0].set_xticks(
np.arange(1, len(model_history.history["acc"]) + 1),
len(model_history.history["acc"]) / 10,
)
axs[0].legend(["train", "val"], loc="best")
# Summarize history for loss
axs[1].plot(
range(1, len(model_history.history["loss"]) + 1), model_history.history["loss"]
)
axs[1].plot(
range(1, len(model_history.history["val_loss"]) + 1),
model_history.history["val_loss"],
)
axs[1].set_title("Model Loss")
axs[1].set_ylabel("Loss")
axs[1].set_xlabel("Epoch")
axs[1].set_xticks(
np.arange(1, len(model_history.history["loss"]) + 1),
len(model_history.history["loss"]) / 10,
)
axs[1].legend(["train", "val"], loc="best")
file_name = "model-history-" + timestamp() + ".png"
plt.savefig(file_name) # Save plot to file
# plt.show() # Show plot
plt.clf() # Clear current figure
plt.close(fig)
def load_data():
# Load data from file
npzfile = np.load("cifar10.npz")
# print(npzfile.files)
X_train = npzfile["X_train"]
X_valid = npzfile["X_valid"]
X_test = npzfile["X_test"]
y_train = npzfile["y_train_hot"]
y_valid = npzfile["y_valid_hot"]
y_test = npzfile["y_test_hot"]
num_train, img_channels, img_rows, img_cols = X_train.shape
num_test, _, _, _ = X_test.shape
num_classes = y_train.shape[1]
# num_classes = len(np.unique(y_train))
print("load_data:")
print("X_train.shape:", num_train, img_channels, img_rows, img_cols)
print()
# Reduce datasets to improve performance
num_records = 2000
X_train = X_train[:num_records]
X_valid = X_valid[:num_records]
X_test = X_test[:num_records]
y_train = y_train[:num_records]
y_valid = y_valid[:num_records]
y_test = y_test[:num_records]
# num_classes = y_train.shape[1]
# print(f"num_classes = {num_classes}")
# print(f"X_test shape: {X_valid.shape}")
# print(f"y_test shape: {y_valid.shape}")
return (X_train, y_train), (X_valid, y_valid), (X_test, y_test)
def load_data_linux():
# Create train/test/validation split
# We want to split our dataset into separate training and test datasets
# We use the training dataset to fit the model and the test dataset to evaluate
# its performance to generalize to unseen data.
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.5)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_valid = X_valid.astype("float32")
# Standardize the columns
# We need to standardize the columns before we feed them to a linear classifier,
# but if the X values are in the range 0-255 then we can transform them to [0,1].
X_train = X_train / 255.0
X_test = X_test / 255.0
X_valid = X_valid / 255.0
# One-hot encoding
# Represent each integer value as a binary vector that is all zeros
# except the index of the integer.
y_train_hot = np_utils.to_categorical(y_train)
y_test_hot = np_utils.to_categorical(y_test)
y_valid_hot = np_utils.to_categorical(y_valid)
print(f"\nnum_classes = {y_train_hot.shape[1]}")
print(X_train.shape[0], "Train samples")
print(X_valid.shape[0], "Validation samples")
print(X_test.shape[0], "Test samples")
print("\nX_train shape:", X_train.shape)
print("X_valid shape:", X_valid.shape)
print("X_test shape:", X_test.shape)
print("y_train shape:", y_train.shape)
print("y_valid shape:", y_valid.shape)
print("y_test shape:", y_test.shape)
print("\ny_train_hot shape:", y_train_hot.shape)
print("y_valid_hot shape:", y_valid_hot.shape)
print("y_test_hot shape:", y_test_hot.shape)
# Data Sanity Check
print(f"\nnum_classes = {y_test_hot.shape[1]}")
print(f"X_test shape: {X_test.shape}")
print(f"y_test:\n{y_test[0:10]}") # Check that dataset has been randomized
return (X_train, y_train), (X_valid, y_valid), (X_test, y_test)
# def load_data_linux():
# # Load data from file
# npzfile = np.load('cifar10.npz')
# # print(npzfile.files)
#
# X_train = npzfile['X_train']
# X_valid = npzfile['X_valid']
# X_test = npzfile['X_test']
#
# y_train = npzfile['y_train_hot']
# y_valid = npzfile['y_valid_hot']
# y_test = npzfile['y_test_hot']
#
# # Reduce datasets to improve performance
# # num_records = 2000
# # X_train = X_train[:num_records]
# # X_valid = X_valid[:num_records]
# # X_test = X_test[:num_records]
# #
# # y_train = y_train[:num_records]
# # y_valid = y_valid[:num_records]
# # y_test = y_test[:num_records]
#
# num_train, img_rows, img_cols, img_channels = X_train.shape
# num_test, _, _, _ = X_test.shape
# num_classes = y_train.shape[1]
# # num_classes = len(np.unique(y_train))
# print("X_train.shape:", num_train, img_channels, img_rows, img_cols)
# print()
#
# # Convert from NCHW to NHWC
# @tf.function
# def transform(x):
# y = tf.transpose(x, [0, 3, 1, 2])
# return y
#
# X_train = transform(X_train)
# X_valid = transform(X_valid)
# X_test = transform(X_test)
# print("After transform:")
# print("X_train.shape:", X_train.get_shape()) # the shape of out is [2000, 32, 32, 3]
# print("X_valid.shape:", X_valid.get_shape())
# print("X_test.shape:", X_test.get_shape())
#
# return (X_train, y_train), (X_valid, y_valid), (X_test, y_test)
def preprocess():
"""
Data pre-processing
@return:
"""
# Create train/test/validation split
# We want to split our dataset into separate training and test datasets
# We use the training dataset to fit the model and the test dataset to evaluate
# its performance to generalize to unseen data.
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.5)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_valid = X_valid.astype("float32")
# Standardize the columns
# We need to standardize the columns before we feed them to a linear classifier,
# but if the X values are in the range 0-255 then we can transform them to [0,1].
X_train = X_train / 255.0
X_test = X_test / 255.0
X_valid = X_valid / 255.0
# One-hot encoding
# Represent each integer value as a binary vector that is all zeros
# except the index of the integer.
y_train_hot = np_utils.to_categorical(y_train)
y_test_hot = np_utils.to_categorical(y_test)
y_valid_hot = np_utils.to_categorical(y_valid)
# y_categories = np.unique(y_train)
print(f"\nnum_classes = {y_train_hot.shape[1]}")
print(X_train.shape[0], "Train samples")
print(X_valid.shape[0], "Validation samples")
print(X_test.shape[0], "Test samples")
print("\nX_train shape:", X_train.shape)
print("X_valid shape:", X_valid.shape)
print("X_test shape:", X_test.shape)
print("y_train shape:", y_train.shape)
print("y_valid shape:", y_valid.shape)
print("y_test shape:", y_test.shape)
print("\ny_train_hot shape:", y_train_hot.shape)
print("y_valid_hot shape:", y_valid_hot.shape)
print("y_test_hot shape:", y_test_hot.shape)
# Data Sanity Check
print(f"\nnum_classes = {y_test_hot.shape[1]}")
print(f"X_test shape: {X_test.shape}")
print(f"y_test:\n{y_test[0:10]}") # Check that dataset has been randomized
# Save datasets to file
np.savez(
"cifar10_test.npz",
X_train=X_train,
X_valid=X_valid,
X_test=X_test,
y_train=y_train,
y_valid=y_valid,
y_test=y_test,
y_train_hot=y_train_hot,
y_valid_hot=y_valid_hot,
y_test_hot=y_test_hot,
)
def create_model(name, num_classes):
"""
Create model for 70% accuracy
The parameters needed to be different to run on linux.
@return:
"""
# epochs = 200
# batch_size = 128
weight_decay = 1e-4
# We expect our inputs | |
<reponame>m---w/pya<filename>pya/pya.py
from .Aserver import Aserver
import numbers
from itertools import compress
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
import scipy.signal
from scipy.fftpack import fft, fftfreq, ifft
from scipy.io import wavfile
from .helpers import ampdb, dbamp, linlin, timeit, spectrum
import logging
from copy import copy, deepcopy
_LOGGER = logging.getLogger(__name__)
_LOGGER.addHandler(logging.NullHandler())
class Asig:
"""Audio signal class. Asig enables manipulation of audio signals in the style of numpy and more.
Asig offer functions for plotting (via matplotlib) and playing audio (using the pya.Aserver class)
Attributes
----------
sig : numpy.array
Array for the audio signal. Can be mono or multichannel.
sr : int
Sampling rate
label : str
A string label to give the object a unique identifier.
channels : int
Number of channels
cn : list of str, None
cn short for channel names is a list of string of size channels,
to give each channel a unique name.
channel names can be used to subset signal channels in a more readible way,
e.g. asig[:, ['left', 'front']] subsets the left and front channels of the signal.
mix_mode : str or None
used to extend numpy __setitem__() operation to frequent audio manipulations such as
mixing, extending, clipping, replacing. Current Asig supports the mix_modes:
bound, extend, overwrite. mix_mode should not be set directly but is set temporarilty when using
the .bound, .extend and .overwrite properties.
"""
def __init__(self, sig, sr=44100, label="", channels=1, cn=None):
"""__init__ method
Parameters
----------
sig: numpy.array or int or float or str
numpy.array for audio signal, str for filepath, int create x samples of silence,
float creates x seconds of seconds.
sr : int
Sampling rate
label : str
Label for the object
channels : int
Number of channels, no need to set it if you already have a signal for the sig argument.
cn : list or None
A list of channel names, size should match the channels.
"""
self.sr = sr
self.mix_mode = None
self._ = {} # dictionary for further return values
if isinstance(sig, str):
self.load_wavfile(sig)
elif isinstance(sig, int): # sample length
if channels == 1:
self.sig = np.zeros(sig).astype("float32")
else:
self.sig = np.zeros((sig, channels)).astype("float32")
elif isinstance(sig, float): # if float interpret as duration
if channels == 1:
self.sig = np.zeros(int(sig * sr)).astype("float32")
else:
self.sig = np.zeros((int(sig * sr), channels)).astype("float32")
else:
self.sig = np.array(sig).astype("float32")
self.label = label
self.cn = cn
self._set_col_names()
@property
def channels(self):
"""Channel property"""
try:
return self.sig.shape[1]
except IndexError:
return 1
@property
def samples(self):
"""Return the length of signal in samples"""
return np.shape(self.sig)[0] # Update it.
@property
def cn(self):
"""Channel names getter"""
return self._cn
@cn.setter
def cn(self, val):
"""Channel names setter"""
if val is None:
self._cn = None
else:
if len(val) == self.channels:
if all(isinstance(x, str) for x in val): # check if all elements are str
self._cn = val
else:
raise TypeError("channel names cn need to be a list of string(s).")
else:
raise ValueError("list size doesn't match channel numbers {}".format(self.channels))
def load_wavfile(self, fname):
"""Load .wav file, and set self.sig to the signal and self.sr to the sampling rate.
Parameters
----------
fname : str
Path to file. .wav format is currently the only supported format. Will add more later.
"""
self.sr, self.sig = wavfile.read(fname) # load the sample data
if self.sig.dtype == np.dtype('int16'):
self.sig = (self.sig / 32768.).astype('float32')
elif self.sig.dtype != np.dtype('float32'):
self.sig = self.sig.astype('float32')
else:
print("load_wavfile: TODO: add format")
def save_wavfile(self, fname="asig.wav", dtype='float32'):
"""Save signal as .wav file
Parameters
----------
fname : str
name of the file with .wav (Default value = "asig.wav")
dtype : str
datatype (Default value = 'float32')
Returns
-------
: Asig
"""
if dtype == 'int16':
data = (self.sig * 32767).astype('int16')
elif dtype == 'int32':
data = (self.sig * 2147483647).astype('int32')
elif dtype == 'uint8':
data = (self.sig * 127 + 128).astype('uint8')
elif dtype == 'float32':
data = self.sig.astype('float32')
scipy.io.wavfile.write(fname, self.sr, data)
return self
def _set_col_names(self):
# Currently, every newly returned asig has a cn argument that is the same as self.
if self.cn is None:
self.cn = [str(i) for i in range(self.channels)]
else:
if isinstance(self.cn[0], str):
self.col_name = {self.cn[i]: i for i in range(len(self.cn))}
else:
raise TypeError("column names need to be a list of strings")
def __getitem__(self, index):
""" Accessing array elements through slicing.
* int, get signal row asig[4];
* slice, range and step slicing asig[4:40:2] # from 4 to 40 every 2 samples;
* list, subset rows, asig[[2, 4, 6]] # pick out index 2, 4, 6 as a new asig
* tuple, row and column specific slicing, asig[4:40, 3:5] # from 4 to 40, channel 3 and 4
* Time slicing (unit in seconds) using dict asig[{1:2.5}, :] creates indexing of 1s to 2.5s.
* Channel name slicing: asig['l'] returns channel 'l' as a new mono asig. asig[['front', 'rear']], etc...
* bool, subset channels: asig[:, [True, False]]
Parameters
----------
index : Number or slice or list or tuple or dict
Slicing argument.
Returns
-------
a : Asig
__getitem__ returns a subset of the self based on the slicing.
"""
if isinstance(index, tuple):
_LOGGER.info(" getitem: index is tuple")
rindex = index[0]
cindex = index[1] if len(index) > 1 else None
elif isinstance(index, str):
return self._[index] # ToDo: decide whether to solve differently, e.g. only via ._[str] or via a .attribute(str) fn
else: # if only slice, list, dict, int or float given for row selection
rindex = index
cindex = None
# parse row index rindex into ridx
if isinstance(rindex, list): # e.g. a[[4,5,7,8,9]], or a[[True, False, True...]]
ridx = rindex
sr = self.sr
elif isinstance(rindex, int): # picking a single row
ridx = rindex
_LOGGER.debug("integer slicing of index: %d", ridx)
sr = self.sr
elif isinstance(rindex, slice):
_LOGGER.info(" getitem: row index is slice.")
_, _, step = rindex.indices(len(self.sig))
sr = int(self.sr / abs(step))
ridx = rindex
elif isinstance(rindex, dict): # time slicing
_LOGGER.info(" getitem: row index is dict. Time slicing.")
for key, val in rindex.items():
try:
start = int(key * self.sr)
except TypeError: # if it is None
start = None
try:
stop = int(val * self.sr)
except TypeError:
stop = None
ridx = slice(start, stop, 1)
sr = self.sr
_LOGGER.debug("Time slicing, start: %s, stop: %s", str(start), str(stop))
else: # Dont think there is a usecase.
ridx = rindex
sr = self.sr
# now parse cindex
if isinstance(cindex, list):
_LOGGER.info(" getitem: column index is list.")
if isinstance(cindex[0], str):
cidx = [self.col_name.get(s) for s in cindex]
if cidx is None:
_LOGGER.error("Input column names does not exist")
cn_new = [self.cn[i] for i in cidx] if self.cn is not None else None
elif isinstance(cindex[0], bool):
cidx = cindex
cn_new = list(compress(self.cn, cindex))
elif isinstance(cindex[0], int):
cidx = cindex
cn_new = [self.cn[i] for i in cindex] if self.cn is not None else None
elif isinstance(cindex, int):
_LOGGER.info(" getitem: column index is int.")
cidx = cindex
cn_new = [self.cn[cindex]] if self.cn is not None else None
elif isinstance(cindex, slice):
_LOGGER.info(" getitem: column index is slice.")
cidx = cindex
cn_new = self.cn[cindex] if self.cn is not None else None
elif isinstance(cindex, str): # if only a single channel name is given.
cidx = self.col_name.get(cindex)
cn_new = [cindex]
else: # if nothing is given, e.g. index = (ridx,) on calling a[:]
cidx = slice(None, None, None)
cn_new = self.cn
# apply ridx and cidx and return result
sig = self.sig[ridx, cidx] if self.channels > 1 else self.sig[ridx]
# Squeezing shouldn't be performed here.
# this is because: a[:10, 0] and a[:10,[True, False]] return
# (10,) and (10, 1) respectively. Which should be dealt with individually.
if sig.ndim == 2 and sig.shape[1] == 1:
if not isinstance(cindex[0], bool): # Hot fix this to be consistent with bool slciing
_LOGGER.debug("ndim is 2 and channel num is 1, performa np.squeeze")
sig = np.squeeze(sig)
if isinstance(sig, numbers.Number):
_LOGGER.info("signal is scalar, convert to array")
sig = np.array(sig)
a = Asig(sig, sr=sr, label=self.label + '_arrayindexed', cn=cn_new)
a.mix_mode = self.mix_mode
return a
| |
<gh_stars>1-10
# Copyright (c) 2018-2021 <NAME>
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable=protected-access,pointless-string-statement
from __future__ import absolute_import, division, print_function
import os
import os.path as osp
import gdown
from collections import OrderedDict
from copy import deepcopy
import torch
from torch import nn
from .tools import mkdir_if_missing, check_isfile
__all__ = [
'save_checkpoint', 'load_checkpoint', 'resume_from_checkpoint',
'open_all_layers', 'open_specified_layers',
'load_pretrained_weights', 'ModelEmaV2'
]
def params_to_device(param, device):
def tensor_to_device(param, device):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
if isinstance(param, torch.Tensor):
tensor_to_device(param, device)
elif isinstance(param, dict):
for subparam in param.values():
tensor_to_device(subparam, device)
def optimizer_to(optim, device):
for param in optim.state.values():
params_to_device(param, device)
def scheduler_to(sched, device):
for param in sched.__dict__.values():
params_to_device(param, device)
def save_checkpoint(
state, save_dir, is_best=False, remove_module_from_keys=False, name='model'
):
r"""Saves checkpoint.
Args:
state (dict): dictionary.
save_dir (str): directory to save checkpoint.
is_best (bool, optional): if True, this checkpoint will be copied and named
``model-best.pth.tar``. Default is False.
remove_module_from_keys (bool, optional): whether to remove "module."
from layer names. Default is False.
Examples::
>>> state = {
>>> 'state_dict': model.state_dict(),
>>> 'epoch': 10,
>>> 'rank1': 0.5,
>>> 'optimizer': optimizer.state_dict()
>>> }
>>> save_checkpoint(state, 'log/my_model')
"""
mkdir_if_missing(save_dir)
if remove_module_from_keys:
# remove 'module.' in state_dict's keys
state_dict = state['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:]
new_state_dict[k] = v
state['state_dict'] = new_state_dict
# save
epoch = state['epoch']
fpath = osp.join(save_dir, f'{name}.pth.tar-' + str(epoch))
torch.save(state, fpath)
print(f'Checkpoint saved to "{fpath}"')
if is_best:
best_link_path = osp.join(osp.dirname(fpath), f'{name}-best.pth.tar')
if osp.lexists(best_link_path):
os.remove(best_link_path)
basename_fpath = osp.basename(fpath)
print(f'Creating best link {basename_fpath} -> {best_link_path}')
os.symlink(basename_fpath, best_link_path)
return fpath
def download_weights(url, chkpt_name='model_weights'):
""" Download model weights from given url """
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
os.makedirs(model_dir, exist_ok=True)
filename = chkpt_name + '.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
try:
gdown.download(url, cached_file)
except Exception as e:
print("ERROR:: error occurred while \
downloading file")
raise e
try:
torch.load(cached_file)
except Exception as e:
print("ERROR:: error occurred while opening \
file with model weights")
raise e
else:
print("SUCCESS:: Model`s weights download completed successfully\n")
return cached_file
def load_checkpoint(fpath, map_location=''):
r"""Loads checkpoint.
``UnicodeDecodeError`` can be well handled, which means
python2-saved files can be read from python3.
Args:
fpath (str): path to checkpoint.
Returns:
dict
Examples::
>>> from torchreid.utils import load_checkpoint
>>> fpath = 'log/my_model/model.pth.tar-10'
>>> checkpoint = load_checkpoint(fpath)
"""
if fpath is None:
raise ValueError('File path is None')
if not osp.exists(fpath):
raise FileNotFoundError(f'File is not found at "{fpath}"')
if not map_location:
map_location = None if torch.cuda.is_available() else 'cpu'
try:
checkpoint = torch.load(fpath, map_location=map_location)
except UnicodeDecodeError as err:
"""
import pickle # nosec
pickle.load = partial(pickle.load, encoding="latin1")
pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
checkpoint = torch.load(
fpath, pickle_module=pickle, map_location=map_location
)
"""
raise RuntimeError('Using pickle reloader could cause vulnerability, so it is blocked') from err
except Exception:
print(f'Unable to load checkpoint from "{fpath}"')
raise
return checkpoint
def resume_from_checkpoint(fpath, model, optimizer=None, scheduler=None, device='cpu'):
r"""Resumes training from a checkpoint.
This will load (1) model weights and (2) ``state_dict``
of optimizer if ``optimizer`` is not None.
Args:
fpath (str): path to checkpoint.
model (nn.Module): model.
optimizer (Optimizer, optional): an Optimizer.
scheduler (LRScheduler, optional): an LRScheduler.
Returns:
int: start_epoch.
Examples::
>>> from torchreid.utils import resume_from_checkpoint
>>> fpath = 'log/my_model/model.pth.tar-10'
>>> start_epoch = resume_from_checkpoint(
>>> fpath, model, optimizer, scheduler
>>> )
"""
is_file = check_isfile(fpath)
if not is_file:
# Then link is presented or something different
# that will be checked and processed in download function
chkpt_name = model.__class__.__name__ + "_resume"
fpath = download_weights(fpath, chkpt_name=chkpt_name)
print(f'Loading checkpoint from "{fpath}"')
checkpoint = load_checkpoint(fpath)
if 'state_dict' in checkpoint:
load_pretrained_weights(model, pretrained_dict=checkpoint['state_dict'])
else:
load_pretrained_weights(model, pretrained_dict=checkpoint)
print('Loaded model weights')
if optimizer is not None and 'optimizer' in checkpoint.keys():
optimizer.load_state_dict(checkpoint['optimizer'])
optimizer_to(optimizer, device)
print('Loaded optimizer')
if scheduler is not None and 'scheduler' in checkpoint.keys():
scheduler.load_state_dict(checkpoint['scheduler'])
scheduler_to(scheduler, device)
print('Loaded scheduler')
if 'epoch' in checkpoint:
start_epoch = checkpoint['epoch']
else:
start_epoch = 0
print(f'Last epoch = {start_epoch}')
if 'rank1' in checkpoint.keys():
print(f"Last rank1 = {checkpoint['rank1']:.1%}")
return start_epoch
def set_bn_to_eval(m):
r"""Sets BatchNorm layers to eval mode."""
# 1. no update for running mean and var
# 2. scale and shift parameters are still trainable
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
def open_all_layers(model):
r"""Opens all layers in model for training.
Examples::
>>> from torchreid.utils import open_all_layers
>>> open_all_layers(model)
"""
model.train()
for p in model.parameters():
if p.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64, torch.bool):
# only Tensors of floating point dtype can require gradients
continue
p.requires_grad = True
def open_specified_layers(model, open_layers, strict=True):
r"""Opens specified layers in model for training while keeping
other layers frozen.
Args:
model (nn.Module): neural net model.
open_layers (str or list): layers open for training.
Examples::
>>> from torchreid.utils import open_specified_layers
>>> # Only model.classifier will be updated.
>>> open_layers = 'classifier'
>>> open_specified_layers(model, open_layers)
>>> # Only model.fc and model.classifier will be updated.
>>> open_layers = ['fc', 'classifier']
>>> open_specified_layers(model, open_layers)
"""
if isinstance(model, nn.DataParallel):
model = model.module
if isinstance(open_layers, str):
open_layers = [open_layers]
if strict:
for layer in open_layers:
if not hasattr(model, layer):
raise ValueError(f'"{layer}" is not an attribute of the model, please provide the correct name')
for name, module in model.named_children():
if name in open_layers:
module.train()
for p in module.parameters():
p.requires_grad = True
else:
module.eval()
for p in module.parameters():
p.requires_grad = False
def _print_loading_weights_inconsistencies(discarded_layers, unmatched_layers):
if discarded_layers:
print(
'** The following layers are discarded '
f'due to unmatched keys or layer size: {discarded_layers}'
)
if unmatched_layers:
print(f'** The following layers were not loaded from checkpoint: {unmatched_layers}')
def load_pretrained_weights(model, file_path='', chkpt_name='model_weights', pretrained_dict=None):
r"""Loads pretrianed weights to model.
Features::
- Incompatible layers (unmatched in name or size) will be ignored.
- Can automatically deal with keys containing "module." and other prefixes.
- Can download weights from link
- Can use pretrained dict directly instead of file path/link if given
Args:
model (nn.Module): network model.
file_path (str): path or link to pretrained weights.
pretrained_dict (str): path or link to pretrained weights.
Examples::
>>> from torchreid.utils import load_pretrained_weights
>>> file_path = 'log/my_model/model-best.pth.tar'
>>> load_pretrained_weights(model, file_path)
"""
def _remove_prefix(key, prefix):
prefix = prefix + '.'
if key.startswith(prefix):
key = key[len(prefix):]
return key
is_file = check_isfile(file_path)
if not is_file and not pretrained_dict:
# Then link is presented or something different
# that will be checked and processed in download function
chkpt_name = osp.split(file_path)[1]
file_path = download_weights(file_path, chkpt_name=chkpt_name)
checkpoint = (load_checkpoint(file_path)
if not pretrained_dict
else pretrained_dict)
if 'classes_map' in checkpoint:
model.classification_classes = checkpoint['classes_map']
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
# discard known prefixes: 'nncf_module.' from NNCF, 'module.' from DataParallel
k = _remove_prefix(k, 'nncf_module')
k = _remove_prefix(k, 'module')
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
message = file_path if file_path else "pretrained dict"
unmatched_layers = sorted(set(model_dict.keys()) - set(new_state_dict))
if len(matched_layers) == 0:
print(
f'The pretrained weights "{message}" cannot be loaded, '
'please check the key names manually'
)
_print_loading_weights_inconsistencies(discarded_layers, unmatched_layers)
raise RuntimeError(f'The pretrained weights {message} cannot be loaded')
print(f'Successfully loaded pretrained weights from "{message}"')
_print_loading_weights_inconsistencies(discarded_layers, unmatched_layers)
# Is based on
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/utils/model_ema.py
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. | |
be fit as if it was new, as well as deep-copied."""
model = self.model
self.model = None
template = copy.deepcopy(self)
template.reset_metrics()
self.model = model
return template
def convert_to_refit_full_template(self):
"""After calling this function, returned model should be able to be fit without X_val, y_val using the iterations trained by the original model."""
params_trained = self.params_trained.copy()
template = self.convert_to_template()
template.params.update(params_trained)
template.name = template.name + REFIT_FULL_SUFFIX
template.set_contexts(self.path_root + template.name + os.path.sep)
return template
def _get_init_args(self):
hyperparameters = self.params.copy()
hyperparameters = {key: val for key, val in hyperparameters.items() if key in self.nondefault_params}
init_args = dict(
path=self.path_root,
name=self.name,
problem_type=self.problem_type,
eval_metric=self.eval_metric,
hyperparameters=hyperparameters,
quantile_levels=self.quantile_levels,
stopping_metric=self.stopping_metric
)
return init_args
def hyperparameter_tune(self, scheduler_options, time_limit=None, **kwargs):
scheduler_options = copy.deepcopy(scheduler_options)
if 'time_out' not in scheduler_options[1]:
scheduler_options[1]['time_out'] = time_limit
kwargs = self.initialize(time_limit=scheduler_options[1]['time_out'], **kwargs)
resource = copy.deepcopy(scheduler_options[1]['resource'])
if 'num_cpus' in resource:
if resource['num_cpus'] == 'auto':
resource.pop('num_cpus')
if 'num_gpus' in resource:
if resource['num_gpus'] == 'auto':
resource.pop('num_gpus')
scheduler_options[1]['resource'] = self._preprocess_fit_resources(silent=True, **resource)
return self._hyperparameter_tune(scheduler_options=scheduler_options, **kwargs)
def _hyperparameter_tune(self, X, y, X_val, y_val, scheduler_options, **kwargs):
"""
Hyperparameter tune the model.
This usually does not need to be overwritten by models.
"""
# verbosity = kwargs.get('verbosity', 2)
time_start = time.time()
logger.log(15, "Starting generic AbstractModel hyperparameter tuning for %s model..." % self.name)
self._set_default_searchspace()
params_copy = self._get_params()
directory = self.path # also create model directory if it doesn't exist
# TODO: This will break on S3. Use tabular/utils/savers for datasets, add new function
scheduler_cls, scheduler_params = scheduler_options # Unpack tuple
if scheduler_cls is None or scheduler_params is None:
raise ValueError("scheduler_cls and scheduler_params cannot be None for hyperparameter tuning")
dataset_train_filename = 'dataset_train.p'
train_path = directory + dataset_train_filename
save_pkl.save(path=train_path, object=(X, y))
dataset_val_filename = 'dataset_val.p'
val_path = directory + dataset_val_filename
save_pkl.save(path=val_path, object=(X_val, y_val))
if not any(isinstance(params_copy[hyperparam], Space) for hyperparam in params_copy):
logger.warning("Attempting to do hyperparameter optimization without any search space (all hyperparameters are already fixed values)")
else:
logger.log(15, "Hyperparameter search space for %s model: " % self.name)
for hyperparam in params_copy:
if isinstance(params_copy[hyperparam], Space):
logger.log(15, f"{hyperparam}: {params_copy[hyperparam]}")
fit_kwargs=scheduler_params['resource'].copy()
fit_kwargs['sample_weight'] = kwargs.get('sample_weight', None)
fit_kwargs['sample_weight_val'] = kwargs.get('sample_weight_val', None)
util_args = dict(
dataset_train_filename=dataset_train_filename,
dataset_val_filename=dataset_val_filename,
directory=directory,
model=self,
time_start=time_start,
time_limit=scheduler_params['time_out'],
fit_kwargs=fit_kwargs,
)
model_trial.register_args(util_args=util_args, **params_copy)
scheduler: FIFOScheduler = scheduler_cls(model_trial, **scheduler_params)
if ('dist_ip_addrs' in scheduler_params) and (len(scheduler_params['dist_ip_addrs']) > 0):
# This is multi-machine setting, so need to copy dataset to workers:
logger.log(15, "Uploading data to remote workers...")
scheduler.upload_files([train_path, val_path]) # TODO: currently does not work.
directory = self.path # TODO: need to change to path to working directory used on every remote machine
model_trial.update(directory=directory)
logger.log(15, "uploaded")
scheduler.run()
scheduler.join_jobs()
return self._get_hpo_results(scheduler=scheduler, scheduler_params=scheduler_params, time_start=time_start)
def _get_hpo_results(self, scheduler, scheduler_params: dict, time_start):
# Store results / models from this HPO run:
best_hp = scheduler.get_best_config() # best_hp only contains searchable stuff
hpo_results = {
'best_reward': scheduler.get_best_reward(),
'best_config': best_hp,
'total_time': time.time() - time_start,
'metadata': scheduler.metadata,
'training_history': scheduler.training_history,
'config_history': scheduler.config_history,
'reward_attr': scheduler._reward_attr,
'args': model_trial.args
}
hpo_results = BasePredictor._format_results(hpo_results) # results summarizing HPO for this model
if ('dist_ip_addrs' in scheduler_params) and (len(scheduler_params['dist_ip_addrs']) > 0):
raise NotImplementedError("need to fetch model files from remote Workers")
# TODO: need to handle locations carefully: fetch these files and put them into self.path directory:
# 1) hpo_results['trial_info'][trial]['metadata']['trial_model_file']
hpo_models = {} # stores all the model names and file paths to model objects created during this HPO run.
hpo_model_performances = {}
for trial in sorted(hpo_results['trial_info'].keys()):
# TODO: ignore models which were killed early by scheduler (eg. in Hyperband). How to ID these?
file_id = f"T{trial}" # unique identifier to files from this trial
trial_model_name = self.name + os.path.sep + file_id
trial_model_path = self.path_root + trial_model_name + os.path.sep
hpo_models[trial_model_name] = trial_model_path
hpo_model_performances[trial_model_name] = hpo_results['trial_info'][trial][scheduler._reward_attr]
logger.log(15, "Time for %s model HPO: %s" % (self.name, str(hpo_results['total_time'])))
logger.log(15, "Best hyperparameter configuration for %s model: " % self.name)
logger.log(15, str(best_hp))
return hpo_models, hpo_model_performances, hpo_results
# Resets metrics for the model
def reset_metrics(self):
self.fit_time = None
self.predict_time = None
self.val_score = None
self.params_trained = dict()
# TODO: Experimental, currently unused
# Has not been tested on Windows
# Does not work if model is located in S3
# Does not work if called before model was saved to disk (Will output 0)
def get_disk_size(self) -> int:
# Taken from https://stackoverflow.com/a/1392549
from pathlib import Path
model_path = Path(self.path)
model_disk_size = sum(f.stat().st_size for f in model_path.glob('**/*') if f.is_file())
return model_disk_size
# TODO: This results in a doubling of memory usage of the model to calculate its size.
# If the model takes ~40%+ of memory, this may result in an OOM error.
# This is generally not an issue because the model already needed to do this when being saved to disk, so the error would have been triggered earlier.
# Consider using Pympler package for memory efficiency: https://pympler.readthedocs.io/en/latest/asizeof.html#asizeof
def get_memory_size(self) -> int:
gc.collect() # Try to avoid OOM error
return sys.getsizeof(pickle.dumps(self, protocol=4))
# Removes non-essential objects from the model to reduce memory and disk footprint.
# If `remove_fit=True`, enables the removal of variables which are required for fitting the model. If the model is already fully trained, then it is safe to remove these.
# If `remove_info=True`, enables the removal of variables which are used during model.get_info(). The values will be None when calling model.get_info().
# If `requires_save=True`, enables the removal of variables which are part of the model.pkl object, requiring an overwrite of the model to disk if it was previously persisted.
def reduce_memory_size(self, remove_fit=True, remove_info=False, requires_save=True, **kwargs):
"""
Removes non-essential objects from the model to reduce memory and disk footprint.
If `remove_fit=True`, enables the removal of variables which are required for fitting the model. If the model is already fully trained, then it is safe to remove these.
If `remove_info=True`, enables the removal of variables which are used during model.get_info(). The values will be None when calling model.get_info().
If `requires_save=True`, enables the removal of variables which are part of the model.pkl object, requiring an overwrite of the model to disk if it was previously persisted.
It is not necessary for models to implement this.
"""
pass
def delete_from_disk(self):
"""
Deletes the model from disk.
WARNING: This will DELETE ALL FILES in the self.path directory, regardless if they were created by AutoGluon or not.
DO NOT STORE FILES INSIDE OF THE MODEL DIRECTORY THAT ARE UNRELATED TO AUTOGLUON.
"""
logger.log(30, f'Deleting model {self.name}. All files under {self.path} will be removed.')
from pathlib import Path
import shutil
model_path = Path(self.path)
# TODO: Report errors?
shutil.rmtree(path=model_path, ignore_errors=True)
def get_info(self) -> dict:
"""
Returns a dictionary of numerous fields describing the model.
"""
info = {
'name': self.name,
'model_type': type(self).__name__,
'problem_type': self.problem_type,
'eval_metric': self.eval_metric.name,
'stopping_metric': self.stopping_metric.name,
'fit_time': self.fit_time,
'num_classes': self.num_classes,
'quantile_levels': self.quantile_levels,
'predict_time': self.predict_time,
'val_score': self.val_score,
'hyperparameters': self.params,
'hyperparameters_fit': self.params_trained, # TODO: Explain in docs that this is for hyperparameters that differ in final model from original hyperparameters, such as epochs (from early stopping)
'hyperparameters_nondefault': self.nondefault_params,
AG_ARGS_FIT: self.params_aux,
'num_features': len(self.features) if self.features else None,
'features': self.features,
'feature_metadata': self.feature_metadata,
# 'disk_size': self.get_disk_size(),
'memory_size': self.get_memory_size(), # Memory usage of model in bytes
}
return info
@classmethod
def load_info(cls, path, load_model_if_required=True) -> dict:
load_path = path + cls.model_info_name
try:
return load_pkl.load(path=load_path)
except:
if load_model_if_required:
model = cls.load(path=path, reset_paths=True)
return model.get_info()
else:
raise
def save_info(self) -> dict:
info = self.get_info()
save_pkl.save(path=self.path + self.model_info_name, object=info)
json_path = self.path + self.model_info_json_name
save_json.save(path=json_path, obj=info)
return info
def _get_default_resources(self):
"""
Determines the default resource usage of the model during fit.
Models may want to override this if they depend heavily on GPUs, as the default sets num_gpus to 0.
"""
num_cpus = get_cpu_count()
num_gpus = 0
return num_cpus, num_gpus
# TODO: v0.1 Add reference link to all valid keys and their usage or keep full docs here and reference elsewhere?
@classmethod
def _get_default_ag_args(cls) -> dict:
"""
Dictionary of customization options related to meta properties of the model such as its name, the order it is trained, and the problem types it is valid for.
"""
return {}
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
"""
[Advanced] Dictionary of customization options related to meta | |
frequent organism
most_frequent = max(self.organism_frequency.keys(), key=(lambda k: self.organism_frequency[k])) # noqa
if most_frequent in organisms_to_match:
entity_id, organism_id, closest_distance = self._get_closest_entity_organism_pair( # noqa
entity=token,
organism_matches={most_frequent: organisms_to_match[most_frequent]}
)
return BestOrganismMatch(
entity_id=entity_id,
organism_id=organism_id,
closest_distance=closest_distance,
specified_organism_id=specified_organism_id)
def _annotate_type_gene(
self,
recognized_entities: RecognizedEntities
) -> List[Annotation]:
"""Gene specific annotation. Nearly identical to `_get_annotation`,
except that we check genes against the matched organisms found in the
document.
It is likely that the annotator will detect keywords that resemble gene
names, but are not genes in the context of the document.
It is also possible that two organisms discussed in the document each have a
gene with the same name. In this case we need a way to distinguish between the
two.
To resolve both of the above issues we check the graph database for
relationships between genes/organisms, and handle each of the following cases:
1. Exactly one organism match for a given gene
2. More than one organism match for a given gene
3. No organism matches for a given gene
Returns list of matched annotations
"""
matches_list: List[LMDBMatch] = recognized_entities.recognized_genes
entities_to_create: List[CreateAnnotationObjParams] = []
entity_token_pairs = []
gene_names: Set[str] = set()
for match in matches_list:
entities_set = set()
for entity in match.entities:
gene_names.add(entity['synonym'])
entities_set.add((entity['synonym'], entity['id_type'], entity.get('hyperlinks', ''))) # noqa
for synonym, datasource, hyperlinks in entities_set:
if hyperlinks == '':
hyperlinks = []
entity_token_pairs.append((synonym, datasource, hyperlinks, match.token))
gene_names_list = list(gene_names)
organism_ids = list(self.organism_frequency)
gene_match_time = time.time()
graph_results = self.graph.get_genes_to_organisms(
genes=gene_names_list,
organisms=organism_ids,
)
current_app.logger.info(
f'Gene organism KG query time {time.time() - gene_match_time}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
gene_organism_matches = graph_results.matches
gene_data_sources = graph_results.data_sources
gene_primary_names = graph_results.primary_names
# any genes not matched in KG fall back to specified organism
fallback_gene_organism_matches = {}
if self.specified_organism.synonym:
gene_match_time = time.time()
fallback_graph_results = \
self.graph.get_genes_to_organisms(
genes=gene_names_list,
organisms=[self.specified_organism.organism_id],
)
current_app.logger.info(
f'Gene fallback organism KG query time {time.time() - gene_match_time}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
fallback_gene_organism_matches = fallback_graph_results.matches
gene_data_sources.update(fallback_graph_results.data_sources)
gene_primary_names.update(fallback_graph_results.primary_names)
for entity_synonym, entity_datasource, entity_hyperlinks, token in entity_token_pairs:
gene_id = None
category = None
organisms_to_match: Dict[str, str] = {}
if entity_synonym in gene_organism_matches:
try:
# prioritize common name that is same as synonym
organisms_to_match = gene_organism_matches[entity_synonym][entity_synonym]
except KeyError:
# an organism can have multiple different genes w/ same synonym
# since we don't know which to use, doing this is fine
for d in list(gene_organism_matches[entity_synonym].values()):
organisms_to_match = {**organisms_to_match, **d}
best_match = self._find_best_organism_match(
token=token,
entity_synonym=entity_synonym,
organisms_to_match=organisms_to_match,
fallback_organism_matches=fallback_gene_organism_matches,
entity_type=EntityType.GENE.value)
if isinf(best_match.closest_distance):
# didn't find a suitable organism in organisms_to_match
continue
gene_id = best_match.entity_id
organism_id = best_match.organism_id
specified_organism_id = best_match.specified_organism_id
category = self.specified_organism.category if specified_organism_id else self.organism_categories[organism_id] # noqa
elif entity_synonym in fallback_gene_organism_matches:
organism_id = self.specified_organism.organism_id
try:
# prioritize common name match over synonym
organisms_to_match = fallback_gene_organism_matches[entity_synonym][entity_synonym] # noqa
except KeyError:
# an organism can have multiple different genes w/ same synonym
# since we don't know which to use, doing this is fine
for d in list(fallback_gene_organism_matches[entity_synonym].values()):
organisms_to_match = {**organisms_to_match, **d}
try:
gene_id = organisms_to_match[self.specified_organism.organism_id] # noqa
category = self.specified_organism.category
except KeyError:
continue
if gene_id and category:
if entity_datasource != gene_data_sources[f'{entity_synonym}{organism_id}']:
continue
entities_to_create.append(
CreateAnnotationObjParams(
token=token,
token_type=EntityType.GENE.value,
entity_synonym=entity_synonym,
entity_name=gene_primary_names[gene_id],
entity_id=gene_id,
entity_datasource=entity_datasource,
entity_hyperlinks=entity_hyperlinks,
entity_category=category
)
)
return self._create_annotation_object(entities_to_create)
def _annotate_type_protein(
self,
recognized_entities: RecognizedEntities
) -> List[Annotation]:
"""Nearly identical to `self._annotate_type_gene`. Return a list of
protein annotations with the correct protein_id. If the protein
was not matched in the knowledge graph, then keep the original
protein_id.
"""
matches_list: List[LMDBMatch] = recognized_entities.recognized_proteins
entities_to_create: List[CreateAnnotationObjParams] = []
entity_token_pairs = []
protein_names: Set[str] = set()
for match in matches_list:
entities_set = set()
for entity in match.entities:
protein_names.add(entity['synonym'])
entities_set.add((entity['synonym'], entity.get('category', ''), entity['id_type'], entity.get('hyperlinks', ''))) # noqa
for synonym, category, datasource, hyperlinks in entities_set:
if hyperlinks == '':
hyperlinks = []
entity_token_pairs.append((synonym, category, datasource, hyperlinks, match.token))
protein_names_list = list(protein_names)
protein_match_time = time.time()
graph_results = self.graph.get_proteins_to_organisms(
proteins=protein_names_list,
organisms=list(self.organism_frequency),
)
current_app.logger.info(
f'Protein organism KG query time {time.time() - protein_match_time}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
protein_organism_matches = graph_results.matches
protein_primary_names = graph_results.primary_names
# any proteins not matched in KG fall back to specified organism
fallback_protein_organism_matches = {}
if self.specified_organism.synonym:
protein_match_time = time.time()
fallback_graph_results = \
self.graph.get_proteins_to_organisms(
proteins=protein_names_list,
organisms=[self.specified_organism.organism_id],
)
current_app.logger.info(
f'Protein fallback organism KG query time {time.time() - protein_match_time}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
fallback_protein_organism_matches = fallback_graph_results.matches
protein_primary_names.update(fallback_graph_results.primary_names)
for entity_synonym, category, entity_datasource, entity_hyperlinks, token in entity_token_pairs: # noqa
# in LMDB we use the synonym as id and name, so do the same here
protein_id = entity_synonym
if entity_synonym in protein_organism_matches:
best_match = self._find_best_organism_match(
token=token,
entity_synonym=entity_synonym,
organisms_to_match=protein_organism_matches[entity_synonym],
fallback_organism_matches=fallback_protein_organism_matches,
entity_type=EntityType.PROTEIN.value)
if isinf(best_match.closest_distance):
# didn't find a suitable organism in organisms_to_match
continue
protein_id = best_match.entity_id
organism_id = best_match.organism_id
specified_organism_id = best_match.specified_organism_id
category = self.specified_organism.category if specified_organism_id else self.organism_categories[organism_id] # noqa
elif entity_synonym in fallback_protein_organism_matches:
try:
protein_id = fallback_protein_organism_matches[entity_synonym][self.specified_organism.organism_id] # noqa
category = self.specified_organism.category
except KeyError:
continue
entities_to_create.append(
CreateAnnotationObjParams(
token=token,
token_type=EntityType.PROTEIN.value,
entity_synonym=entity_synonym,
entity_name=protein_primary_names.get(protein_id, entity_synonym),
entity_id=protein_id,
entity_datasource=entity_datasource,
entity_hyperlinks=entity_hyperlinks,
entity_category=category
)
)
return self._create_annotation_object(entities_to_create)
def _annotate_local_species(
self,
recognized_entities: RecognizedEntities
) -> List[Annotation]:
"""Similar to self._get_annotation() but for creating
annotations of custom species.
However, does not check if a synonym is used by multiple
common names that all appear in the document, as assume
user wants these custom species annotations to be
annotated.
"""
matches = recognized_entities.recognized_local_species
entities_to_create: List[CreateAnnotationObjParams] = []
for match in matches:
for entity in match.entities:
try:
entities_to_create.append(
CreateAnnotationObjParams(
token=match.token,
token_type=EntityType.SPECIES.value,
entity_name=entity['name'],
entity_synonym=entity['synonym'],
entity_id=entity[EntityIdStr.SPECIES.value],
entity_datasource=entity['id_type'],
entity_hyperlinks=entity.get('id_hyperlinks', []),
entity_category=entity.get('category', '')
)
)
except KeyError:
continue
return self._create_annotation_object(entities_to_create)
def _annotate_type_species(
self,
entity_id_str: str,
custom_annotations: List[dict],
recognized_entities: RecognizedEntities
) -> List[Annotation]:
species_annotations = self._get_annotation(
matches_list=recognized_entities.recognized_species,
token_type=EntityType.SPECIES.value,
id_str=entity_id_str
)
local_species_annotations = self._annotate_local_species(recognized_entities)
# TODO: think about this
# if a user creates a local inclusion for a species,
# even if they chose to only annotate one occurrence of that word,
# should all other occurrences be considered when annotating?
# or could the same situation with gene/protein occur?
# e.g case sensitive mean different things
local_inclusions = [
custom for custom in custom_annotations if custom.get(
'meta', {}).get('type') == EntityType.SPECIES.value and not custom.get(
'meta', {}).get('includeGlobally')]
# we only want the annotations with correct coordinates
# because it is possible for a word to only have one
# of its occurrences included as a custom annotation
filtered_local_species_annotations: List[Annotation] = []
for custom in local_inclusions:
for custom_anno in local_species_annotations:
if custom.get('rects') and len(custom['rects']) == len(custom_anno.rects):
# check if center point for each rect in custom_anno.rects
# is in the corresponding rectangle from custom annotations
valid = all(list(map(has_center_point, custom['rects'], custom_anno.rects)))
# if center point is in custom annotation rectangle
# then add it to list
if valid:
filtered_local_species_annotations.append(custom_anno)
# clean species annotations first
# because genes depend on them
species_annotations = self._clean_annotations(annotations=species_annotations)
species_annotations_with_local = [anno for anno in species_annotations]
if local_inclusions:
species_annotations_with_local += filtered_local_species_annotations
self.organism_frequency, self.organism_locations, self.organism_categories = \
self._get_entity_frequency_location_and_category(species_annotations_with_local)
return species_annotations
def _get_entity_frequency_location_and_category(
self,
annotations
) -> Tuple[
Dict[str, int],
Dict[str, List[Tuple[int, int]]],
Dict[str, str]]:
"""Takes as input a list of annotation objects (intended to be of a single entity type).
Returns the frequency of the annotation entities, and their locations within the document.
"""
matched_entity_locations: Dict[str, List[Tuple[int, int]]] = {}
entity_frequency: Dict[str, int] = {}
entity_categories: Dict[str, str] = {}
locations: Dict[str, Set[Tuple[int, int]]] = {}
for annotation in annotations:
entity_id = annotation.meta.id
offset_pairs = (annotation.lo_location_offset, annotation.hi_location_offset)
if entity_frequency.get(entity_id, None):
entity_frequency[entity_id] += 1
else:
entity_frequency[entity_id] = 1
if locations.get(entity_id, None):
locations[entity_id].add(offset_pairs)
else:
locations[entity_id] = {offset_pairs}
# Need to add an entry for humans if we annotated a virus
if annotation.meta.category == OrganismCategory.VIRUSES.value:
if locations.get(HOMO_SAPIENS_TAX_ID, None):
locations[HOMO_SAPIENS_TAX_ID].add(offset_pairs)
else:
locations[HOMO_SAPIENS_TAX_ID] = {offset_pairs}
if entity_frequency.get(HOMO_SAPIENS_TAX_ID, None):
entity_frequency[HOMO_SAPIENS_TAX_ID] += 1
else:
entity_frequency[HOMO_SAPIENS_TAX_ID] = 1
entity_categories[HOMO_SAPIENS_TAX_ID] = OrganismCategory.EUKARYOTA.value
entity_categories[entity_id] = annotation.meta.category or ''
for k, v in locations.items():
matched_entity_locations[k] = sorted(v)
return entity_frequency, matched_entity_locations, entity_categories
def _get_fixed_false_positive_unified_annotations(
self,
annotations_list: List[Annotation]
) -> List[Annotation]:
"""Removes any false positive annotations.
False positives occurred during our matching
because we normalize the text from the pdf and
the keys in lmdb.
False positives are multi length word that
got matched to a shorter length word due to
normalizing in lmdb. Or words that get matched
but the casing were not taken into account, e.g
gene 'marA' is correct, but 'mara' | |
parts
# $ argv "${a[@]}${undef[@]:-${c[@]}}"
# ['1', '24', '5']
#log('DQ part %s', part)
# Special case for "". The parser outputs (DoubleQuoted []), instead
# of (DoubleQuoted [Literal '']). This is better but it means we
# have to check for it.
if len(parts) == 0:
v = part_value.String('', True, False)
part_vals.append(v)
return
for p in parts:
self._EvalWordPart(p, part_vals, quoted=True)
def EvalDoubleQuotedToString(self, dq_part):
# type: (double_quoted) -> str
"""For double quoted strings in Oil expressions.
Example: var x = "$foo-${foo}"
"""
part_vals = [] # type: List[part_value_t]
self._EvalDoubleQuoted(dq_part.parts, part_vals)
return self._PartValsToString(part_vals, dq_part.left.span_id)
def _DecayArray(self, val):
# type: (value__MaybeStrArray) -> value__Str
"""Decay $* to a string."""
assert val.tag == value_e.MaybeStrArray, val
sep = self.splitter.GetJoinChar()
tmp = [s for s in val.strs if s is not None]
return value.Str(sep.join(tmp))
def _EmptyStrOrError(self, val, token=None):
# type: (value_t, Optional[Token]) -> value_t
if val.tag_() == value_e.Undef:
if self.exec_opts.nounset():
if token is None:
e_die('Undefined variable')
else:
name = token.val[1:] if token.val.startswith('$') else token.val
e_die('Undefined variable %r', name, token=token)
else:
return value.Str('')
else:
return val
def _EmptyMaybeStrArrayOrError(self, token):
# type: (Token) -> value_t
assert token is not None
if self.exec_opts.nounset():
e_die('Undefined array %r', token.val, token=token)
else:
return value.MaybeStrArray([])
def _EvalBracedVarSub(self, part, part_vals, quoted):
# type: (braced_var_sub, List[part_value_t], bool) -> None
"""
Args:
part_vals: output param to append to.
"""
# We have four types of operator that interact.
#
# 1. Bracket: value -> (value, bool maybe_decay_array)
#
# 2. Then these four cases are mutually exclusive:
#
# a. Prefix length: value -> value
# b. Test: value -> part_value[]
# c. Other Suffix: value -> value
# d. no operator: you have a value
#
# That is, we don't have both prefix and suffix operators.
#
# 3. Process maybe_decay_array here before returning.
maybe_decay_array = False # for $*, ${a[*]}, etc.
name_query = False
var_name = None # type: str # For ${foo=default}
# 1. Evaluate from (var_name, var_num, token Id) -> value
if part.token.id == Id.VSub_Name:
# Handle ${!prefix@} first, since that looks at names and not values
if (part.prefix_op is not None and
part.suffix_op is not None and
part.suffix_op.tag_() == suffix_op_e.Nullary):
names = self.mem.VarNamesStartingWith(part.token.val)
names.sort()
val = value.MaybeStrArray(names) # type: value_t
suffix_op = cast(Token, part.suffix_op)
# "${!prefix@}" is the only one that doesn't decay
maybe_decay_array = not (quoted and suffix_op.id == Id.VOp3_At)
name_query = True
else:
var_name = part.token.val
# TODO: LINENO can use its own span_id!
val = self.mem.GetVar(var_name)
elif part.token.id == Id.VSub_Number:
var_num = int(part.token.val)
val = self._EvalVarNum(var_num)
else:
# $* decays
val, maybe_decay_array = self._EvalSpecialVar(part.token.id, quoted)
var_index = None # type: a_index_t
# 2. Bracket: value -> (value v, bool maybe_decay_array)
# maybe_decay_array is for joining ${a[*]} and unquoted ${a[@]} AFTER
# suffix ops are applied. If we take the length with a prefix op, the
# distinction is ignored.
if part.bracket_op:
bracket_op = part.bracket_op
UP_bracket_op = bracket_op
with tagswitch(bracket_op) as case:
if case(bracket_op_e.WholeArray):
bracket_op = cast(bracket_op__WholeArray, UP_bracket_op)
op_id = bracket_op.op_id
if op_id == Id.Lit_At:
maybe_decay_array = not quoted # ${a[@]} decays but "${a[@]}" doesn't
UP_val = val
with tagswitch(val) as case2:
if case2(value_e.Undef):
val = self._EmptyMaybeStrArrayOrError(part.token)
elif case2(value_e.Str):
val = cast(value__Str, UP_val)
e_die("Can't index string with @", part=part)
elif case2(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
# TODO: Is this a no-op? Just leave 'val' alone.
val = value.MaybeStrArray(val.strs)
elif op_id == Id.Arith_Star:
maybe_decay_array = True # both ${a[*]} and "${a[*]}" decay
UP_val = val
with tagswitch(val) as case2:
if case2(value_e.Undef):
val = self._EmptyMaybeStrArrayOrError(part.token)
elif case2(value_e.Str):
val = cast(value__Str, UP_val)
e_die("Can't index string with *", part=part)
elif case2(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
# TODO: Is this a no-op? Just leave 'val' alone.
# ${a[*]} or "${a[*]}" : maybe_decay_array is always true
val = value.MaybeStrArray(val.strs)
else:
raise AssertionError(op_id) # unknown
elif case(bracket_op_e.ArrayIndex):
bracket_op = cast(bracket_op__ArrayIndex, UP_bracket_op)
anode = bracket_op.expr
UP_val = val
with tagswitch(val) as case2:
if case2(value_e.Undef):
pass # it will be checked later
elif case2(value_e.Str):
# Bash treats any string as an array, so we can't add our own
# behavior here without making valid OSH invalid bash.
e_die("Can't index string %r with integer", part.token.val,
token=part.token)
elif case2(value_e.MaybeStrArray):
array_val = cast(value__MaybeStrArray, UP_val)
index = self.arith_ev.EvalToInt(anode)
var_index = a_index.Int(index)
try:
# could be None because representation is sparse
s = array_val.strs[index]
except IndexError:
s = None
if s is None:
val = value.Undef()
else:
val = value.Str(s)
elif case2(value_e.AssocArray):
assoc_val = cast(value__AssocArray, UP_val)
key = self.arith_ev.EvalWordToString(anode)
var_index = a_index.Str(key)
s = assoc_val.d.get(key)
if s is None:
val = value.Undef()
else:
val = value.Str(s)
else:
raise AssertionError(val.tag_())
else:
raise AssertionError(bracket_op.tag_())
else: # no bracket op
# When the array is "$@", var_name is None
if var_name and val.tag_() in (value_e.MaybeStrArray, value_e.AssocArray):
suffix_op2 = part.suffix_op
if (suffix_op2 and suffix_op2.tag_() == suffix_op_e.Nullary and
cast(Token, suffix_op2).id == Id.VOp0_a):
# ${array@a} is a string
# TODO: An IR for ${} might simplify these lengthy conditions
pass
elif CheckCompatArray(var_name, self.exec_opts, not (part.prefix_op or part.suffix_op)):
# for ${BASH_SOURCE}, etc.
val = ResolveCompatArray(val)
else:
e_die("Array %r can't be referred to as a scalar (without @ or *)",
var_name, part=part)
if part.prefix_op and not name_query:
val = self._EmptyStrOrError(val) # maybe error
# could be
# - ${!ref-default}
# - "${!assoc[@]}" vs. ${!assoc[*]} (TODO: maybe_decay_array for this
# case)
val = self._ApplyPrefixOp(val, part.prefix_op, part.token)
# NOTE: The length operator followed by a suffix operator is a SYNTAX
# error.
if part.suffix_op and not name_query:
op = part.suffix_op
UP_op = op
with tagswitch(op) as case:
if case(suffix_op_e.Nullary):
op = cast(Token, UP_op)
op_id = op.id
if op_id == Id.VOp0_P:
prompt = self.prompt_ev.EvalPrompt(val)
# readline gets rid of these, so we should too.
p = prompt.replace('\x01', '').replace('\x02', '')
val = value.Str(p)
elif op_id == Id.VOp0_Q:
assert val.tag_() == value_e.Str, val
val = cast(value__Str, val)
val = value.Str(qsn.maybe_shell_encode(val.s))
# oddly, 'echo ${x@Q}' is equivalent to 'echo "${x@Q}"' in bash
quoted = True
elif op_id == Id.VOp0_a:
# We're ONLY simluating -a and -A, not -r -x -n for now. See
# spec/ble-idioms.test.sh.
chars = [] # type: List[str]
with tagswitch(val) as case:
if case(value_e.MaybeStrArray):
chars.append('a')
elif case(value_e.AssocArray):
chars.append('A')
if var_name is not None: # e.g. ${?@a} is allowed
cell = self.mem.GetCell(var_name)
if cell.readonly:
chars.append('r')
if cell.exported:
chars.append('x')
if cell.nameref:
chars.append('n')
val = value.Str(''.join(chars))
else:
e_die('Var op %r not implemented', op.val, token=op)
elif case(suffix_op_e.Unary):
op = cast(suffix_op__Unary, UP_op)
if consts.GetKind(op.op_id) == Kind.VTest:
# TODO: Also we need bracket_op to form lvalue here?
# So pass 'part'?
# bracket_op_e.ArrayIndex
# you already evaluated 'key' and 'index' above, so I guess
# you need index_t?
if self._ApplyTestOp(val, op, quoted, part_vals, var_name, var_index, part.token):
# e.g. to evaluate ${undef:-'default'}, we already appended
# what we need
return
else:
val = self._EmptyStrOrError(val) # maybe error
# Other suffix: value -> value
val = self._ApplyUnarySuffixOp(val, op)
elif case(suffix_op_e.PatSub): # PatSub, vectorized
op = cast(suffix_op__PatSub, UP_op)
val = self._EmptyStrOrError(val) # ${undef//x/y}
# globs are supported in the pattern
pat_val = self.EvalWordToString(op.pat, quote_kind=quote_e.FnMatch)
assert pat_val.tag == value_e.Str, pat_val
if op.replace:
replace_val = self.EvalWordToString(op.replace)
assert replace_val.tag == value_e.Str, replace_val
replace_str = replace_val.s
else:
replace_str = ''
regex, warnings = glob_.GlobToERE(pat_val.s)
if len(warnings):
# TODO:
# - Add 'set -o strict-glob' mode and expose warnings.
# "Glob is not in CANONICAL FORM".
# - Propagate location info back to the 'op.pat' word.
pass
replacer = string_ops.GlobReplacer(regex, replace_str, op.spids[0])
with tagswitch(val) as case2:
if case2(value_e.Str):
str_val = cast(value__Str, val)
s = replacer.Replace(str_val.s, op)
val = value.Str(s)
elif case2(value_e.MaybeStrArray):
array_val = cast(value__MaybeStrArray, val)
strs = [] # type: List[str]
for s in array_val.strs:
if s is not None:
strs.append(replacer.Replace(s, op))
val = value.MaybeStrArray(strs)
elif case2(value_e.AssocArray):
assoc_val = cast(value__AssocArray, val)
strs = []
for s in assoc_val.d.values():
strs.append(replacer.Replace(s, op))
val = value.MaybeStrArray(strs)
else:
raise AssertionError(val.tag_())
elif case(suffix_op_e.Slice):
op = cast(suffix_op__Slice, UP_op)
val = self._EmptyStrOrError(val) # ${undef:3:1}
if op.begin:
begin = self.arith_ev.EvalToInt(op.begin)
else:
begin = 0
# Note: bash allows lengths to be negative (with odd | |
<filename>DynaShp_adj.py
# ----------------------------------------------------------------------
# DynaShp_adj.py
# ----------------------------------------------------------------------
# Author: <NAME>
# Date: 05 February 2020
# Purpose: Script to create stn/msr shapefiles from DynAdjust .adj file
# ----------------------------------------------------------------------
# Usage: cmd:\> python DynaShp_adj.py <*.adj_file>
# ----------------------------------------------------------------------
# Notes: - Requires coordinate types PLHh. Grid coordinates are computed
# if absent.
# - Currently handles msr types BDEGHLMRXY. Other types are ignored.
# - GDA2020 and GDA94 reference frames are supported. Others are
# written as datum-less.
#
# Future work:
# - Include remaining msr types
# - Introduce additional epsg codes for other reference frames
# - Handle variable coordinate formats
# - Introduce functions for shapefile writes (not one per msr type)
# ----------------------------------------------------------------------
# Update: 02 June 2020
# - Script updated to better detect start of measurements/stations
# - Station read works with variable coordinate types.
# Minimum required coord types are PLHh
# ----------------------------------------------------------------------
# Update: 04 August 2020 (<NAME>)
# - Added Station Description output
# ----------------------------------------------------------------------
import geodepy.convert as gc
import os
import shapefile
adj_file = os.sys.argv[1]
# ----------------------------------------------------------------------
# prepare file names
# ----------------------------------------------------------------------
# pyshp truncates shapefile names at the first period.
count = 0
for l in adj_file:
count += 1
if l == '.':
short = adj_file[:count-1]
break
# no period in file name
if count == len(adj_file):
short = adj_file
point_name = short + '_stn'
b_msr_name = short + '_b'
d_msr_name = short + '_d'
e_msr_name = short + '_e'
g_msr_name = short + '_g'
h_msr_name = short + '_h'
l_msr_name = short + '_l'
m_msr_name = short + '_m'
r_msr_name = short + '_r'
x_msr_name = short + '_x'
y_msr_name = short + '_y'
# ----------------------------------------------------------------------
# prepare counters/dictionaries
# ----------------------------------------------------------------------
msr_switch = False
stn_switch = False
msr_line = None
stn_line = None
desc_index = None
line_count = 0
mandatory_coord_types = 'PLHh'
l_count = 0
h_count = 0
r_count = 0
g_count = 0
x_count = 0
y_count = 0
d_count = 0
d_set = 0
e_count = 0
m_count = 0
b_count = 0
l_msrs = {}
h_msrs = {}
r_msrs = {}
g_msrs = {}
x_msrs = {}
y_msrs = {}
d_msrs = {}
m_msrs = {}
e_msrs = {}
b_msrs = {}
stns = {}
# ----------------------------------------------------------------------
# read adj results
# ----------------------------------------------------------------------
adj_fh = open(adj_file, 'r')
print(" reading adj results ...")
for line in adj_fh:
line_count += 1
if 'Adjusted Measurements' in line:
msr_line = line_count + 5
if msr_line:
if line_count == msr_line:
msr_switch = True
if 'Adjusted Coordinates' in line:
stn_line = line_count + 5
if stn_line:
if line_count == stn_line:
stn_switch = True
if line_count == stn_line - 2:
desc_index = line.find('Description')
if line[:35] == 'Reference frame: ':
ref_frame = line[35:].strip()
if line[:35] == 'Station coordinate types: ':
coord_types = line[35:].strip()
for l in mandatory_coord_types:
if l not in coord_types:
print('*' * 20)
print(' Warning! Mandatory coordinate types not present in {:s}'.format(adj_file))
print(' .adj file must contain coord types PLHh')
print('')
print('Exiting...')
exit()
if msr_switch:
msrType = line[0:1]
if msrType == 'L':
l_count += 1
station1 = line[2:22].strip()
station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
msr = data[0]
adj = data[1]
cor = data[2]
msr_SD = data[3]
adj_SD = data[4]
res = data[5]
nStat = data[6]
pelzer = data[7]
PreAdjCor = data[8]
Outlier = line[204:205]
l_msrs[l_count] = {
'Stn1': station1,
'Stn2': station2,
'Msr': msr,
'Msr_SD': msr_SD,
'Adj': adj,
'Cor': cor,
'nStat': nStat,
'outlier':Outlier,
}
continue
if msrType == 'B':
b_count += 1
station1 = line[2:22].strip()
station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
msr = data[0]
adj = data[1]
cor = data[2]
msr_SD = data[3]
adj_SD = data[4]
res = data[5]
nStat = data[6]
pelzer = data[7]
PreAdjCor = data[8]
Outlier = line[204:205]
b_msrs[b_count] = {
'Stn1': station1,
'Stn2': station2,
'Msr': msr,
'Msr_SD': msr_SD,
'Adj': adj,
'Cor': cor,
'nStat': nStat,
'outlier': Outlier,
}
continue
if msrType == 'E':
e_count += 1
station1 = line[2:22].strip()
station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
msr = data[0]
adj = data[1]
cor = data[2]
msr_SD = data[3]
adj_SD = data[4]
res = data[5]
nStat = data[6]
pelzer = data[7]
PreAdjCor = data[8]
Outlier = line[204:205]
e_msrs[e_count] = {
'Stn1': station1,
'Stn2': station2,
'Msr': msr,
'Msr_SD': msr_SD,
'Adj': adj,
'Cor': cor,
'nStat': nStat,
'outlier': Outlier,
}
continue
if msrType == 'M':
m_count += 1
station1 = line[2:22].strip()
station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
msr = data[0]
adj = data[1]
cor = data[2]
msr_SD = data[3]
adj_SD = data[4]
res = data[5]
nStat = data[6]
pelzer = data[7]
PreAdjCor = data[8]
Outlier = line[204:205]
m_msrs[m_count] = {
'Stn1': station1,
'Stn2': station2,
'Msr': msr,
'Msr_SD': msr_SD,
'Adj': adj,
'Cor': cor,
'nStat': nStat,
'outlier': Outlier,
}
continue
if msrType == 'G':
g_count += 1
station1 = line[2:22].strip()
station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
# reset lists if first in tuple
if g_count % 3 == 1:
g_msr = []
g_adj = []
g_cor = []
g_msr_SD = []
g_adj_SD = []
g_res = []
g_nStat = []
g_pelzer = []
g_PreAdjCor = []
g_Outlier = []
g_msr.append(data[0])
g_adj.append(data[1])
g_cor.append(float(data[2]))
g_msr_SD.append(data[3])
g_adj_SD.append(data[4])
g_res.append(data[5])
g_nStat.append(float(data[6]))
try:
g_pelzer.append(data[7])
except:
print(line)
print(data)
exit()
g_PreAdjCor.append(data[8])
g_Outlier.append(line[204:205])
# write to dictionary if 3rd in tuple
if g_count % 3 == 0:
# find max nStat
max_nStat = 0.0
for n in g_nStat:
if abs(n) > max_nStat:
max_nStat = n
# find max nStat
max_Cor = 0.0
for c in g_cor:
if abs(c) > max_Cor:
max_Cor = c
# write to dictionary
g_msrs[g_count/3] = {
'Stn1': station1,
'Stn2': station2,
'Msr_X': g_msr[0],
'Msr_Y': g_msr[1],
'Msr_Z': g_msr[2],
'Msr_SD_X': g_msr_SD[0],
'Msr_SD_Y': g_msr_SD[1],
'Msr_SD_Z': g_msr_SD[2],
'Adj_X': g_adj[0],
'Adj_Y': g_adj[1],
'Adj_Z': g_adj[2],
'Cor_X': g_cor[0],
'Cor_Y': g_cor[1],
'Cor_Z': g_cor[2],
'Max_Cor': max_Cor,
'nStat_X': g_nStat[0],
'nStat_Y': g_nStat[1],
'nStat_Z': g_nStat[2],
'Max_nStat': max_nStat,
'outlier_X': g_Outlier[0],
'outlier_Y': g_Outlier[1],
'outlier_Z': g_Outlier[2]
}
continue
if msrType == 'X':
x_count += 1
station1 = line[2:22].strip()
station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
# reset lists if first in tuple
if x_count % 3 == 1:
x_msr = []
x_adj = []
x_cor = []
x_msr_SD = []
x_adj_SD = []
x_res = []
x_nStat = []
x_pelzer = []
x_PreAdjCor = []
x_Outlier = []
x_msr.append(data[0])
x_adj.append(data[1])
x_cor.append(float(data[2]))
x_msr_SD.append(data[3])
x_adj_SD.append(data[4])
x_res.append(data[5])
x_nStat.append(float(data[6]))
x_pelzer.append(data[7])
x_PreAdjCor.append(data[8])
x_Outlier.append(line[204:205])
# write to dictionary if 3rd in tuple
if x_count % 3 == 0:
# find max nStat
max_nStat = 0.0
for n in x_nStat:
if abs(n) > max_nStat:
max_nStat = n
# find max nStat
max_Cor = 0.0
for c in x_cor:
if abs(c) > max_Cor:
max_Cor = c
# write to dictionary
x_msrs[x_count/3] = {
'Stn1': station1,
'Stn2': station2,
'Msr_X': x_msr[0],
'Msr_Y': x_msr[1],
'Msr_Z': x_msr[2],
'Msr_SD_X': x_msr_SD[0],
'Msr_SD_Y': x_msr_SD[1],
'Msr_SD_Z': x_msr_SD[2],
'Adj_X': x_adj[0],
'Adj_Y': x_adj[1],
'Adj_Z': x_adj[2],
'Cor_X': x_cor[0],
'Cor_Y': x_cor[1],
'Cor_Z': x_cor[2],
'Max_Cor': max_Cor,
'nStat_X': x_nStat[0],
'nStat_Y': x_nStat[1],
'nStat_Z': x_nStat[2],
'Max_nStat': max_nStat,
'outlier_X': x_Outlier[0],
'outlier_Y': x_Outlier[1],
'outlier_Z': x_Outlier[2]
}
continue
if msrType == 'Y':
y_count += 1
station1 = line[2:22].strip()
# station2 = line[22:42].strip()
flag = line[62:63]
data = line[67:].split()
# reset lists if first in tuple
if y_count % 3 == 1:
y_msr = []
y_adj = []
y_cor = []
y_msr_SD = []
y_adj_SD = []
y_res = []
y_nStat = []
y_pelzer = []
y_PreAdjCor = []
y_Outlier = []
y_msr.append(data[0])
y_adj.append(data[1])
y_cor.append(float(data[2]))
y_msr_SD.append(data[3])
y_adj_SD.append(data[4])
y_res.append(data[5])
y_nStat.append(float(data[6]))
y_pelzer.append(data[7])
y_PreAdjCor.append(data[8])
y_Outlier.append(line[204:205])
# write to dictionary if 3rd in tuple
if y_count % 3 == 0:
# find max nStat
max_nStat = 0.0
for n in y_nStat:
if abs(n) > max_nStat:
max_nStat = n
# find max nStat
max_Cor = 0.0
for c in y_cor:
if abs(c) > max_Cor:
max_Cor = c
# write to dictionary
y_msrs[y_count/3] = {
'Stn1': station1,
'Msr_X': y_msr[0],
'Msr_Y': y_msr[1],
'Msr_Z': y_msr[2],
'Msr_SD_X': y_msr_SD[0],
'Msr_SD_Y': y_msr_SD[1],
'Msr_SD_Z': y_msr_SD[2],
'Adj_X': y_adj[0],
'Adj_Y': y_adj[1],
'Adj_Z': y_adj[2],
'Cor_X': y_cor[0],
'Cor_Y': y_cor[1],
'Cor_Z': y_cor[2],
'Max_Cor': max_Cor,
'nStat_X': y_nStat[0],
'nStat_Y': y_nStat[1],
'nStat_Z': y_nStat[2],
'Max_nStat': max_nStat,
'outlier_X': y_Outlier[0],
'outlier_Y': y_Outlier[1],
'outlier_Z': y_Outlier[2]
}
continue
if msrType == 'H':
h_count += 1
station1 = line[2:22].strip()
flag = line[62:63]
data = line[67:].split()
msr = data[0]
adj = data[1]
cor = data[2]
msr_SD = data[3]
adj_SD = data[4]
res = data[5]
nStat = data[6]
pelzer = data[7]
PreAdjCor = data[8]
Outlier = line[204:205]
h_msrs[h_count] = | |
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import pax
import pytest
# def test_batchnorm_train():
# bn = pax.BatchNorm(
# 3, True, True, 0.9, reduced_axes=[0, 1], param_shape=[1, 1, 3]
# )
# bn = pax.enable_train_mode(bn)
# x = jnp.ones((1, 10, 3))
# old_state = bn.ema_var.averages
# y = bn(x)
# new_state = bn.ema_var.averages
# chex.assert_tree_all_equal_shapes(old_state, new_state)
# chex.assert_tree_all_finite(new_state)
# assert y.shape == (1, 10, 3)
def test_batchnorm1D_train():
bn = pax.BatchNorm1D(3, True, True, 0.9)
bn = pax.enable_train_mode(bn)
x = jnp.ones((1, 10, 3))
old_state = bn.ema_mean.averages
bn, y = pax.purecall(bn, x)
new_state = bn.ema_mean.averages
chex.assert_tree_all_equal_shapes(old_state, new_state)
chex.assert_tree_all_finite(new_state)
assert y.shape == (1, 10, 3)
def test_batchnorm2D_train():
bn = pax.BatchNorm2D(3, True, True, 0.9)
bn = pax.enable_train_mode(bn)
x = jnp.ones((1, 10, 8, 3))
old_state = bn.scale
bn, y = pax.purecall(bn, x)
new_state = bn.scale
chex.assert_tree_all_equal_shapes(old_state, new_state)
chex.assert_tree_all_finite(new_state)
assert y.shape == (1, 10, 8, 3)
# def test_batchnorm_eval():
# bn = pax.BatchNorm(
# 3, True, True, 0.9, reduced_axes=[0, 1], param_shape=[1, 1, 3]
# )
# bn = pax.enable_eval_mode(bn)
# x = jnp.ones((1, 10, 3))
# old_state = bn.ema_mean
# y = bn(x)
# new_state = bn.ema_mean
# assert y.shape == (1, 10, 3)
# assert old_state == new_state
# def test_batchnorm_params_filter():
# bn = pax.BatchNorm(
# 3, True, True, 0.9, reduced_axes=[0, 1], param_shape=[1, 1, 3]
# )
# params = pax.select_parameters(bn)
# bn = bn.update_parameters(params)
def test_conv_1d_basic():
conv = pax.Conv1D(3, 5, 3, padding="SAME", with_bias=False)
x = jnp.ones((1, 10, 3), dtype=jnp.float32)
y = conv(x)
assert y.shape == (1, 10, 5)
def test_conv_2d_basic():
conv = pax.Conv2D(3, 5, 3, padding="SAME", with_bias=True)
x = jnp.ones((1, 10, 10, 3), dtype=jnp.float32)
y = conv(x)
assert y.shape == (1, 10, 10, 5)
def test_layer_norm_1():
"""Make sure our LayerNorm behaves the same as hk.LayerNorm."""
layer_norm = pax.LayerNorm(3, -1, True, True)
print(layer_norm.summary())
x = np.random.randn(32, 3).astype(np.float32)
fwd = hk.transform(lambda x: hk.LayerNorm(-1, True, True)(x))
rng = jax.random.PRNGKey(42)
params = fwd.init(rng, x)
np.testing.assert_array_equal(layer_norm.scale, params["layer_norm"]["scale"])
np.testing.assert_array_equal(layer_norm.offset, params["layer_norm"]["offset"])
y1 = fwd.apply(params, None, x)
y2 = layer_norm(x)
np.testing.assert_array_equal(y1, y2)
def test_layer_norm_2():
"""Make sure our LayerNorm behaves the same as hk.LayerNorm."""
layer_norm = pax.LayerNorm(3, -1, False, False)
print(layer_norm.summary())
x = np.random.randn(32, 3).astype(np.float32)
fwd = hk.transform(lambda x: hk.LayerNorm(-1, False, False)(x))
rng = jax.random.PRNGKey(42)
params = fwd.init(rng, x)
# np.testing.assert_array_equal(layer_norm.scale, params["layer_norm"]["scale"])
# np.testing.assert_array_equal(layer_norm.offset, params["layer_norm"]["offset"])
y1 = fwd.apply(params, None, x)
y2 = layer_norm(x)
np.testing.assert_array_equal(y1, y2)
def test_layer_norm_init():
"""Make sure our LayerNorm behaves the same as hk.LayerNorm."""
layer_norm = pax.LayerNorm(
3,
-1,
True,
True,
scale_init=jax.nn.initializers.normal(),
offset_init=jax.nn.initializers.normal(),
)
x = np.empty((32, 3), dtype=np.float32)
fwd = hk.transform(
lambda x: hk.LayerNorm(
-1,
True,
True,
scale_init=hk.initializers.RandomNormal(),
offset_init=hk.initializers.TruncatedNormal(),
)(x)
)
rng = jax.random.PRNGKey(42)
params = fwd.init(rng, x)
chex.assert_equal_shape((layer_norm.scale, params["layer_norm"]["scale"]))
chex.assert_equal_shape((layer_norm.offset, params["layer_norm"]["offset"]))
def test_group_norm_1():
"""Make sure our GroupNorm behaves the same as hk.GroupNorm."""
group_norm = pax.GroupNorm(8, 32, -1)
x = np.random.randn(32, 4, 4, 32).astype(np.float32)
fwd = hk.transform(lambda x: hk.GroupNorm(8, -1, True, True)(x))
rng = jax.random.PRNGKey(42)
params = fwd.init(rng, x)
np.testing.assert_array_equal(group_norm.scale, params["group_norm"]["scale"])
np.testing.assert_array_equal(group_norm.offset, params["group_norm"]["offset"])
o1 = group_norm(x)
o2 = fwd.apply(params, rng, x)
np.testing.assert_array_equal(o1, o2)
def test_linear_computation():
fc = pax.Linear(1, 1)
x = jnp.array([[5.0]], dtype=jnp.float32)
y = fc(x)
target = x * fc.weight + fc.bias
assert target.item() == y.item()
def test_linear():
fc = pax.Linear(5, 7)
x = jnp.zeros((32, 5), dtype=jnp.float32)
y = fc(x)
assert y.shape == (32, 7)
assert fc.bias is not None
def test_linear_1():
fc = pax.Linear(5, 7, b_init=jax.nn.initializers.normal())
rng_key = jax.random.PRNGKey(42)
x = jax.random.normal(rng_key, (32, 5), dtype=jnp.float32)
y = fc(x)
expected_y = jnp.matmul(x, fc.weight) + fc.bias
np.testing.assert_allclose(expected_y, y)
def test_linear_wo_bias():
fc = pax.Linear(5, 7, with_bias=False)
x = jnp.zeros((32, 5), dtype=jnp.float32)
y = fc(x)
assert y.shape == (32, 7)
assert hasattr(fc, "bias") == False
def test_linear_input_shape_error():
fc = pax.Linear(2, 3, b_init=jax.nn.initializers.normal())
rng_key = jax.random.PRNGKey(42)
x = jax.random.normal(rng_key, (2,), dtype=jnp.float32)
with pytest.raises(AssertionError):
y = fc(x)
def test_sequential_mix():
net = pax.Sequential(pax.Linear(1, 2), jax.nn.relu, pax.Linear(2, 3))
params = net.parameters()
x = jnp.zeros((2, 1))
y = net(x)
assert y.shape == (2, 3)
# def test_sequential_non_mix():
# net = pax.Sequential(
# pax.Linear(1, 2),
# pax.BatchNorm(2, True, True, 0.99, reduced_axes=[0], param_shape=[1, 2]),
# pax.Linear(2, 3),
# )
# params = net.parameters()
# x = jnp.zeros((2, 1))
# y = net(x)
# assert y.shape == (2, 3)
def test_sequential_all_jax():
net = pax.Sequential(jax.nn.relu, jax.nn.relu, jax.nn.relu)
params = net.parameters()
x = jnp.zeros((2, 1))
y = net(x)
assert y.shape == (2, 1)
def test_conv_no_bias():
conv = pax.Conv2D(3, 3, 3, 1, 1, "SAME", False)
# assert conv.bias == None and "bias" not in conv.pax.name_to_kind
def test_native_conv1d_1():
rng_key = jax.random.PRNGKey(42)
conv1d = pax.Conv1D(
in_features=3,
out_features=5,
kernel_shape=3,
stride=1,
rate=1,
padding=[(1, 1)],
with_bias=True,
data_format="NWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 6, 3))
y = conv1d(x)
assert y.shape == (2, 6, 5)
def test_native_conv1d_2():
rng_key = jax.random.PRNGKey(42)
conv1d = pax.Conv1D(
in_features=7,
out_features=5,
kernel_shape=3,
stride=1,
rate=1,
padding=[(1, 1)],
with_bias=False,
data_format="NWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 6, 7))
y = conv1d(x)
assert y.shape == (2, 6, 5)
hk_conv = hk.transform(
lambda x: hk.Conv1D(5, 3, 1, 1, [(1, 1)], False, data_format="NWC")(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv1_d": {"w": conv1d.weight}}, rng_key, x)
np.testing.assert_allclose(y, hk_y)
def test_native_conv1d_3():
rng_key = jax.random.PRNGKey(42)
conv1d = pax.Conv1D(
in_features=7,
out_features=5,
kernel_shape=3,
stride=2,
rate=1,
padding=[(1, 1)],
with_bias=False,
data_format="NWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 6, 7))
y = conv1d(x)
hk_conv = hk.transform(
lambda x: hk.Conv1D(5, 3, 2, 1, [(1, 1)], False, data_format="NWC")(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv1_d": {"w": conv1d.weight}}, rng_key, x)
assert params["conv1_d"]["w"].shape == conv1d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv1d_4():
rng_key = jax.random.PRNGKey(42)
conv1d = pax.Conv1D(
in_features=7,
out_features=5,
kernel_shape=30,
stride=2,
rate=3,
padding="SAME",
with_bias=False,
data_format="NWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 6, 7))
y = conv1d(x)
hk_conv = hk.transform(
lambda x: hk.Conv1D(5, 30, 2, 3, "SAME", False, data_format="NWC")(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv1_d": {"w": conv1d.weight}}, rng_key, x)
assert params["conv1_d"]["w"].shape == conv1d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv1d_5():
rng_key = jax.random.PRNGKey(42)
conv1d = pax.Conv1D(
in_features=8,
out_features=6,
kernel_shape=30,
stride=2,
rate=3,
padding="SAME",
with_bias=False,
data_format="NWC",
feature_group_count=2,
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 6, 8))
y = conv1d(x)
hk_conv = hk.transform(
lambda x: hk.Conv1D(
6, 30, 2, 3, "SAME", False, data_format="NWC", feature_group_count=2
)(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv1_d": {"w": conv1d.weight}}, rng_key, x)
assert params["conv1_d"]["w"].shape == conv1d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv2d_1():
rng_key = jax.random.PRNGKey(42)
conv2d = pax.Conv2D(
in_features=3,
out_features=5,
kernel_shape=3,
stride=1,
rate=1,
padding=[(1, 1), (1, 1)],
with_bias=True,
data_format="NHWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 6, 7, 3))
y = conv2d(x)
assert y.shape == (2, 6, 7, 5)
def test_native_conv2d_2():
rng_key = jax.random.PRNGKey(11)
conv2d = pax.Conv2D(
in_features=7,
out_features=5,
kernel_shape=(20, 30),
stride=(2, 3),
rate=(2, 3),
padding="SAME",
with_bias=False,
data_format="NHWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 40, 60, 7))
y = conv2d(x)
hk_conv = hk.transform(
lambda x: hk.Conv2D(
5, (20, 30), (2, 3), (2, 3), "SAME", False, data_format="NHWC"
)(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv2_d": {"w": conv2d.weight}}, rng_key, x)
assert params["conv2_d"]["w"].shape == conv2d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv2d_3():
rng_key = jax.random.PRNGKey(55)
conv2d = pax.Conv2D(
in_features=7,
out_features=5,
kernel_shape=(20, 30),
stride=(2, 3),
rate=(2, 3),
padding="VALID",
with_bias=False,
data_format="NHWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 40, 60, 7))
y = conv2d(x)
hk_conv = hk.transform(
lambda x: hk.Conv2D(
5, (20, 30), (2, 3), (2, 3), "VALID", False, data_format="NHWC"
)(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv2_d": {"w": conv2d.weight}}, rng_key, x)
assert params["conv2_d"]["w"].shape == conv2d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv2d_4():
rng_key = jax.random.PRNGKey(66)
conv2d = pax.Conv2D(
in_features=7,
out_features=5,
kernel_shape=(20, 30),
stride=(1, 1),
rate=(2, 3),
padding="VALID",
with_bias=False,
data_format="NHWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 40, 60, 7))
y = conv2d(x)
hk_conv = hk.transform(
lambda x: hk.Conv2D(
5, (20, 30), (1, 1), (2, 3), "VALID", False, data_format="NHWC"
)(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv2_d": {"w": conv2d.weight}}, rng_key, x)
assert params["conv2_d"]["w"].shape == conv2d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv2d_5():
rng_key = jax.random.PRNGKey(99)
conv2d = pax.Conv2D(
in_features=7,
out_features=5,
kernel_shape=(10, 20),
stride=(1, 1),
rate=(1, 3),
padding="VALID",
with_bias=False,
data_format="NHWC",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 40, 60, 7))
y = conv2d(x)
hk_conv = hk.transform(
lambda x: hk.Conv2D(
5, (10, 20), (1, 1), (1, 3), "VALID", False, data_format="NHWC"
)(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv2_d": {"w": conv2d.weight}}, rng_key, x)
assert params["conv2_d"]["w"].shape == conv2d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv2d_6():
rng_key = jax.random.PRNGKey(40)
conv2d = pax.Conv2D(
in_features=7,
out_features=5,
kernel_shape=(10, 20),
stride=(1, 1),
rate=(1, 3),
padding="VALID",
with_bias=False,
data_format="NCHW",
rng_key=rng_key,
)
x = jax.random.normal(rng_key, (2, 7, 40, 60))
y = conv2d(x)
hk_conv = hk.transform(
lambda x: hk.Conv2D(
5, (10, 20), (1, 1), (1, 3), "VALID", False, data_format="NCHW"
)(x),
)
params = hk_conv.init(rng_key, x)
hk_y = hk_conv.apply({"conv2_d": {"w": conv2d.weight}}, rng_key, x)
assert params["conv2_d"]["w"].shape == conv2d.weight.shape
np.testing.assert_allclose(y, hk_y)
def test_native_conv2d_7():
rng_key = | |
#!/bin/env python3
# import os
# os.environ['PYTHONASYNCIODEBUG'] = '1'
# import logging
# logging.getLogger('asyncio').setLevel(logging.DEBUG)
from datetime import datetime
import traceback
import atexit
import argparse
import os
from os import path
import sys
import logging
from struct import pack
import random
from time import time, sleep, perf_counter
from socket import socket
from configparser import ConfigParser
from shutil import which
from asyncio import sleep, Protocol, get_event_loop, Task
from pickle import dumps
import csv
from ..consts import BUILD_TIMESTAMP_VARNAME
from ..util import version, resolve, create_process, kill_all_processes, gcd
from ..util import verbose as util_verbose
from ..lib import AckTimeout, ClientProtocolMixin, SamplerSample
from ..varsfile import merge_vars_from_file_and_list
from ..dwarfutil import read_elf_variables
logger = logging.getLogger()
module_dir = os.path.dirname(os.path.realpath(__file__))
pc_dir = os.path.join(module_dir, '..', '..', '..', 'examples', 'pc_platform')
pc_executable = os.path.join(pc_dir, 'pc')
def start_fake_bench(port):
return start_fake_sine(ticks_per_second=0, port=port)
def start_fake_sine(ticks_per_second, port, build_timestamp_value):
# Run in a separate process so it doesn't hog the CPython lock
# Use our executable to work with a development environment (python executable)
# or pyinstaller (emotool.exe)
if sys.argv[0].endswith(path.basename(get_python_executable())):
cmdline = sys.argv[:2]
elif path.isfile(sys.argv[0]) or path.isfile(sys.argv[0] + '.exe'):
cmdline = [sys.argv[0]]
elif which(sys.argv[0]):
cmdline = [sys.argv[0]]
# force usage of python if the first parameter is a python script; use extension as predicate
if cmdline[0].endswith('.py'):
cmdline = [get_python_executable()] + cmdline
#print("{sys_argv} ; which said {which}".format(sys_argv=repr(sys.argv), which=which(sys.argv[0]))
return create_process(cmdline + ['--embedded', '--ticks-per-second', str(ticks_per_second), '--port', str(port),
'--build-timestamp-value', str(build_timestamp_value)])
def start_pc(port, exe, debug):
exe = os.path.realpath(exe)
cmdline = [exe, str(port)]
cmdline_str = ' '.join(cmdline)
debug_cmdline = 'EMOLOG_PC_PORT={port} cgdb --args {cmdline_str}'.format(port=port, cmdline_str=cmdline_str)
os.environ['EMOLOG_PC_PORT'] = str(port)
if debug:
input("press enter once you ran pc with: {debug_cmdline}".format(debug_cmdline=debug_cmdline))
return
return create_process(cmdline)
def iterate(prefix, initial):
while True:
yield '{}_{:03}.csv'.format(prefix, initial)
initial += 1
def next_available(folder, prefix):
filenames = iterate(prefix, 1)
for filename in filenames:
candidate = os.path.join(folder, filename)
if not os.path.exists(candidate):
return candidate
def setup_logging(filename, silent):
if silent:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.DEBUG)
if filename:
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(level=logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
stream_formatter = logging.Formatter('%(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level=logging.INFO)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)
logger.debug('debug first')
logger.info('info first')
def start_serial_process(serialurl, baudrate, hw_flow_control, port):
"""
Block until serial2tcp is ready to accept a connection
"""
serial2tcp_cmd = create_python_process_cmdline('serial2tcp.py')
if hw_flow_control is True:
serial2tcp_cmd += ['-r']
serial2tcp_cmd += ' -b {} -p {} -P {}'.format(baudrate, serialurl, port).split()
serial_subprocess = create_process(serial2tcp_cmd)
return serial_subprocess
def create_python_process_cmdline(script):
script_path = os.path.join(module_dir, script)
return [sys.executable, script_path]
def create_python_process_cmdline_command(command):
return [sys.executable, '-c', command]
class EmoToolClient(ClientProtocolMixin):
def __init__(self, ticks_per_second, verbose, dump, debug, csv_writer_factory=None):
if debug:
print("timeout set to one hour for debugging (gdb)")
ClientProtocolMixin.ACK_TIMEOUT_SECONDS = 3600.0
super().__init__(verbose=verbose, dump=dump,
ticks_per_second=ticks_per_second,
csv_writer_factory=csv_writer_factory)
@property
def running(self):
return self.cylib.running()
@property
def ticks_lost(self):
return self.cylib.csv_handler.ticks_lost
@property
def samples_received(self):
return self.cylib.csv_handler.samples_received
@property
def csv_filename(self):
return self.cylib.csv_handler.csv_filename
def reset(self, *args, **kw):
self.last_samples_received = None # don't trigger the check_progress() watchdog on the next sample
self.cylib.csv_handler.reset(*args, **kw)
def register_listener(self, *args, **kw):
self.cylib.csv_handler.register_listener(*args, **kw)
def data_received(self, data):
self.cylib.data_received(data)
async def start_transport(client, args):
loop = get_event_loop()
port = random.randint(10000, 50000)
if args.fake is not None:
if args.fake == 'gen':
start_fake_sine(ticks_per_second=args.ticks_per_second, port=port, build_timestamp_value=args.fake_gen_build_timestamp_value)
elif args.fake == 'bench':
start_fake_bench(port)
elif args.fake == 'pc' or os.path.exists(args.fake):
exe = pc_executable if args.fake == 'pc' else args.fake
start_pc(port=port, exe=exe, debug=args.debug)
else:
print("error: unfinished support for fake {fake}".format(fake=args.fake))
raise SystemExit
else:
start_serial_process(serialurl=args.serial, baudrate=args.baud, hw_flow_control=args.hw_flow_control, port=port)
attempt = 0
while attempt < 10:
attempt += 1
await sleep(0.1)
s = socket()
try:
s.connect(('127.0.0.1', port))
except:
pass
else:
break
client_transport, client2 = await loop.create_connection(lambda: client, sock=s)
assert client2 is client
args = None
def cancel_outstanding_tasks():
for task in Task.all_tasks():
logger.warning('canceling task {}'.format(task))
task.cancel()
def windows_try_getch():
import msvcrt
if msvcrt.kbhit():
return msvcrt.getch()
return None # be explicit
if sys.platform == 'win32':
try_getch_message = "Press any key to stop capture early..."
try_getch = windows_try_getch
else:
try_getch_message = "Press Ctrl-C to stop capture early..."
def try_getch():
return None
async def cleanup(args, client):
if not hasattr(client, 'transport') or client.transport is None:
cancel_outstanding_tasks()
return
if not args.no_cleanup:
logger.info("sending sampler stop")
try:
await client.send_sampler_stop()
except:
logger.info("exception when sending sampler stop in cleanup()")
client.exit_gracefully()
if client.transport is not None:
client.transport.close()
kill_all_processes()
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Emolog protocol capture tool. Implements emolog client side, captures a given set of variables to a csv file')
parser.add_argument('--fake', # TODO: can I have a hook for choices? i.e. choices=ChoicesOrExecutable['gen', 'pc', 'bench'],
help='debug only - fake a client - either generated or pc controller')
now_timestamp = int(datetime.now().timestamp() * 1000)
parser.add_argument('--fake-elf-build-timestamp-value', type=int, default=now_timestamp, help='debug only - fake build timestamp value (address is fixed)')
parser.add_argument('--fake-gen-build-timestamp-value', type=int, default=now_timestamp, help='debug only - fake build timestamp value (address is fixed)')
parser.add_argument('--serial', default='auto', help='serial URL or device name') # see http://pythonhosted.org/pyserial/pyserial_api.html#serial.serial_for_url
parser.add_argument('--baud', default=8000000, help='baudrate, using RS422 up to 12000000 theoretically', type=int)
parser.add_argument('--hw_flow_control', default=False, action='store_true', help='use CTS/RTS signals for flow control')
parser.add_argument('--elf', default=None, help='elf executable running on embedded side')
parser.add_argument('--var', default=[], action='append',
help='add a single var, example "foo,1,0" = "varname,ticks,tickphase"')
parser.add_argument('--snapshotfile', help='file containing variable definitions to be taken once at startup')
parser.add_argument('--varfile', help='file containing variable definitions, identical to multiple --var calls')
group = parser.add_mutually_exclusive_group()
group.add_argument('--out', help='Output file name. ".csv" extension is added if missing. '
'File is overwritten if already exists.')
group.add_argument('--out_prefix', default='emo', help='Output file prefix. Output is saved to the first free '
'(not already existing) file of the format "prefix_xxx.csv", '
'where xxx is a sequential number starting from "001"')
parser.add_argument('--csv-factory', help='advanced: module[.module]*.function to use as factory for csv file writing', default=None)
parser.add_argument('--verbose', default=True, action='store_false', dest='silent',
help='turn on verbose logging; affects performance under windows')
parser.add_argument('--verbose-kill', default=False, action='store_true')
parser.add_argument('--log', default=None, help='log messages and other debug/info logs to this file')
parser.add_argument('--runtime', type=float, default=3.0, help='quit after given seconds. use 0 for endless run.')
parser.add_argument('--no-cleanup', default=False, action='store_true', help='do not stop sampler on exit')
parser.add_argument('--dump')
parser.add_argument('--ticks-per-second', default=1000000 / 50, type=float,
help='number of ticks per second. used in conjunction with runtime')
parser.add_argument('--debug', default=False, action='store_true', help='produce more verbose debugging output')
# Server - used for GUI access
parser.add_argument('--listen', default=None, type=int, help='enable listening TCP port for samples') # later: add a command interface, making this suitable for interactive GUI
parser.add_argument('--gui', default=False, action='store_true', help='launch graphing gui in addition to saving')
# Embedded
parser.add_argument('--embedded', default=False, action='store_true', help='debugging: be a fake embedded target')
parser.add_argument('--check-timestamp', action='store_true', default=False, help='wip off by default for now')
ret, unparsed = parser.parse_known_args(args=args)
if ret.fake is None:
if not ret.elf and not ret.embedded:
# elf required unless fake_sine in effect
parser.print_usage()
print("{e}: error: the following missing argument is required: --elf".format(e=sys.argv[0]))
raise SystemExit
else:
if ret.fake == 'gen':
# fill in fake vars
ret.var = [
# name, ticks, phase
'a,1,0',
'b,1,0',
'c,1,0',
'd,1,0',
'e,1,0',
'f,1,0',
'g,1,0',
'h,1,0',
]
else:
if ret.elf is None:
if ret.fake == 'pc':
if not os.path.exists(pc_executable):
print("missing pc ELF file: {e}".format(e=pc_executable))
raise SystemExit
ret.elf = pc_executable
else:
ret.elf = ret.fake
if ret.varfile is None:
ret.varfile = os.path.join(module_dir, '..', '..', 'vars.csv')
ret.snapshotfile = os.path.join(module_dir, '..', '..', 'snapshot_vars.csv')
return ret
def bandwidth_calc(args, variables):
"""
:param variables: list of dictionaries
:return: average baud rate (considering 8 data bits, 1 start & stop bits)
"""
packets_per_second = args.ticks_per_second # simplification: assume a packet every tick (upper bound)
header_average = packets_per_second * SamplerSample.empty_size()
payload_average = sum(args.ticks_per_second / v['period_ticks'] * v['size'] for v in variables)
return (header_average + payload_average) * 10
async def initialize_board(client, variables):
logger.debug("about to send version")
await client.send_version()
retries = max_retries = 3
while retries > 0:
try:
logger.debug("about to send sampler stop")
await client.send_sampler_stop()
logger.debug("about to send sampler set variables")
await client.send_set_variables(variables)
logger.debug("about to send sampler start")
await client.send_sampler_start()
logger.debug("client initiated, starting to log data at rate TBD")
break
except AckTimeout:
retries -= 1
logger.info("Ack Timeout. Retry {}".format(max_retries - retries))
return retries != 0
def banner(s):
print("=" * len(s))
print(s)
print("=" * len(s))
async def run_client(args, client, variables, allow_kb_stop):
if not await initialize_board(client=client, variables=variables):
logger.error("Failed to initialize board, exiting.")
raise SystemExit
sys.stdout.flush()
logger.info('initialized board')
dt = 0.1 if args.runtime is not None else 1.0
if allow_kb_stop and try_getch_message:
print(try_getch_message)
client.start_logging_time = time()
while client.running:
if allow_kb_stop and try_getch():
break
await sleep(dt)
await client.send_sampler_stop()
async def record_snapshot(args, client, csv_filename, varsfile, extra_vars=None):
if extra_vars is None:
extra_vars = []
defs = merge_vars_from_file_and_list(filename=varsfile, def_lines=extra_vars)
names, variables = read_elf_variables(elf=args.elf, defs=defs, fake_build_timestamp=args.fake_elf_build_timestamp_value)
elf_by_name = {x['name']: x for x in variables}
client.reset(csv_filename=csv_filename, names=names, min_ticks=1, max_samples=1)
await run_client(args, client, variables, allow_kb_stop=False)
read_values = {}
try:
with open(csv_filename) as fd:
lines = list(csv.reader(fd))
except IOError as io:
logger.warning("snapshot failed, no file created")
lines = | |
<reponame>enomotom/nnabla
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Save network structure into file.
'''
from collections import OrderedDict
import google.protobuf.text_format as text_format
import numpy
import os
import re
import shutil
import tempfile
import zipfile
from nnabla import save_parameters
from nnabla.logger import logger
from nnabla.parameter import get_parameters
from nnabla.utils import nnabla_pb2
from nnabla.utils.save_function import _create_function_nntxt
# ----------------------------------------------------------------------
# Helper functions
# ----------------------------------------------------------------------
def _get_unique_function_name(function_type, functions):
'''Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name
'''
function_name = function_name_base = function_type
count = 2
while function_name in functions:
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name
def _get_unique_variable_name(vname, variables):
'''Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name
'''
count = 2
vname_base = vname
while vname in variables:
vname = '{}_{}'.format(vname_base, count)
count += 1
return vname
def _get_variable_name_or_register(var, variables, names, params, prefix):
'''
Args:
var (~nnabla.Variable)
variables (OrderedDict)
names (dict): Force name table, Variable -> str
params (dict): NdArray -> str
prefix(str)
'''
if var not in variables.values():
vname = prefix
if var.data in params:
vname = params[var.data]
elif var in names:
vname = names[var]
vname = _get_unique_variable_name(vname, variables)
variables[vname] = var
else:
vname = list(variables.keys())[list(variables.values()).index(var)]
return vname
def _get_net_variables(net):
inputs = []
outputs = []
variables = {}
net_parameters = []
for v in net.variable:
variables[v.name] = v
if v.type == 'Parameter':
net_parameters.append(v.name)
for f in net.function:
for v in f.input:
if variables[v].type == 'Buffer':
inputs.append(v)
for v in f.output:
if variables[v].type == 'Buffer':
outputs.append(v)
net_inputs = list(set(inputs) - set(outputs))
net_outputs = list(set(outputs) - set(inputs))
return net_inputs, net_outputs, net_parameters
def _create_global_config(ctx):
g = nnabla_pb2.GlobalConfig()
g.default_context.backend = ctx.backend
g.default_context.array_class = ctx.array_class
g.default_context.device_id = ctx.device_id
g.default_context.compute_backend = ctx.compute_backend
return g
def _create_training_config(max_epoch, iter_per_epoch, save_best):
t = nnabla_pb2.TrainingConfig()
t.max_epoch = max_epoch
t.iter_per_epoch = iter_per_epoch
t.save_best = save_best
return t
def _create_dataset(name, uri, cache_dir, variables, shuffle, batch_size):
d = nnabla_pb2.Dataset()
d.name = name
d.uri = uri
if cache_dir is not None:
d.cache_dir = cache_dir
d.shuffle = shuffle
d.batch_size = batch_size
d.variable.extend(variables)
return d
def _get_network_sink(outputs):
import nnabla.functions as F
outputs = [o for o in outputs.values()]
return F.sink(*outputs)
def _create_network(net):
n = nnabla_pb2.Network()
n.name = net['name']
n.batch_size = net['batch_size']
# List (dict: name -> Variable) of outputs.
outputs = net['outputs']
sink = _get_network_sink(outputs)
# Create force name table: Variable -> name.
names = {}
names.update(net['names'])
names.update(outputs)
# Reverse dict: Variable --> Name
names = {v: k for k, v in names.items()}
# Create table: NdArray -> str
# (Use Ndarray instead of Variable because parameter variable might be
# unlinked)
params = {v.data: k for k, v in get_parameters(grad_only=False).items()}
# ----------------------------------------------------------------------
# Parse graph to get variables and functions
# ----------------------------------------------------------------------
variables = OrderedDict()
functions = OrderedDict()
def collect_info(func):
# Collect information.
function_type = func.info.type_name
if function_type == 'Sink':
return
function_name = _get_unique_function_name(function_type, functions)
functions[function_name] = {
'type': function_type,
'args': func.info.args,
'inputs': [],
'outputs': []
}
for i in func.inputs:
base_name = '{}_Input'.format(function_name)
vname = _get_variable_name_or_register(
i, variables, names, params, base_name)
functions[function_name]['inputs'].append(vname)
for o in func.outputs:
base_name = '{}_Output'.format(function_name)
vname = _get_variable_name_or_register(
o, variables, names, params, base_name)
functions[function_name]['outputs'].append(vname)
sink.visit(collect_info)
# ----------------------------------------------------------------------
# Convert variables and functions into proto
# ----------------------------------------------------------------------
for name, variable in variables.items():
v = n.variable.add()
v.name = name
shape = list(numpy.array(variable.d).shape)
if variable.data in params:
v.type = 'Parameter'
else:
v.type = 'Buffer'
# TODO: The first dimension is always considered as batch size.
# No problem?
if len(shape) > 0:
shape[0] = -1
v.shape.dim.extend(shape)
# ----------------------------------------------------------------------
# Add info to variable
# ----------------------------------------------------------------------
# TODO: Only required for Parameter variables?
if variable.info:
i = v.initializer
i.type = variable.info.initializer.__class__.__name__.replace(
'Initializer', '')
i.multiplier = 0.0
if i.type == 'Constant':
i.multiplier = variable.info.initializer.value
elif i.type == 'Uniform':
i.multiplier = -variable.info.initializer.lim[0]
elif i.type == 'Normal':
i.multiplier = variable.info.initializer.sigma
else:
pass # TODO Error
for name, function in functions.items():
f = n.function.add()
_create_function_nntxt(f, name, function)
return n
def _create_optimizer(name, solver, network, dataset):
o = nnabla_pb2.Optimizer()
o.name = name
o.network_name = network.name
o.dataset_name = dataset.name
o.solver.type = re.sub(r'(|Cuda)$', '', str(solver.name))
if o.solver.type == 'Adadelta':
o.solver.adadelta_param.lr = solver.info['lr']
o.solver.adadelta_param.decay = solver.info['decay']
o.solver.adadelta_param.eps = solver.info['eps']
elif o.solver.type == 'Adagrad':
o.solver.adagrad_param.lr = solver.info['lr']
o.solver.adagrad_param.eps = solver.info['eps']
elif o.solver.type == 'Adam':
o.solver.adam_param.alpha = solver.info['alpha']
o.solver.adam_param.beta1 = solver.info['beta1']
o.solver.adam_param.beta2 = solver.info['beta2']
o.solver.adam_param.eps = solver.info['eps']
elif o.solver.type == 'Adamax':
o.solver.adamax_param.alpha = solver.info['alpha']
o.solver.adamax_param.beta1 = solver.info['beta1']
o.solver.adamax_param.beta2 = solver.info['beta2']
o.solver.adamax_param.eps = solver.info['eps']
elif o.solver.type == 'Momentum':
o.solver.momentum_param.lr = solver.info['lr']
o.solver.momentum_param.momentum = solver.info['momentum']
elif o.solver.type == 'Nesterov':
o.solver.nesterov_param.lr = solver.info['lr']
o.solver.nesterov_param.momentum = solver.info['momentum']
elif o.solver.type == 'RMSprop':
o.solver.rmsprop_param.lr = solver.info['lr']
o.solver.rmsprop_param.decay = solver.info['decay']
o.solver.rmsprop_param.eps = solver.info['eps']
elif o.solver.type == 'Sgd':
o.solver.sgd_param.lr = solver.info['lr']
inputs, outputs, params = _get_net_variables(network)
for n, inp in enumerate(inputs):
d = o.data_variable.add()
d.variable_name = inp
d.data_name = dataset.variable[n]
for out in outputs:
d = o.loss_variable.add()
d.variable_name = out
for param in params:
d = o.parameter_variable.add()
d.variable_name = param
d.learning_rate_multiplier = 1.0
return o
def _create_monitor(name, monitor, network, dataset):
m = nnabla_pb2.Monitor()
m.name = name
m.network_name = network.name
m.dataset_name = dataset.name
inputs, outputs, params = _get_net_variables(network)
for n, inp in enumerate(inputs):
d = m.data_variable.add()
d.variable_name = inp
d.data_name = dataset.variable[n]
for out in outputs:
d = m.monitor_variable.add()
d.type = 'Error'
d.variable_name = out
return m
def _create_executor(name, network, data, output, remap=None):
'''
'''
if remap is None:
remap = {}
e = nnabla_pb2.Executor()
e.name = name
e.network_name = network.name
_, _, params = _get_net_variables(network)
var_dict = {v.name: v for v in network.variable}
for vname in data:
try:
_ = var_dict[vname]
except KeyError:
raise KeyError("{} not found in {}".format(vname, network.name))
dv = e.data_variable.add()
dv.variable_name = vname
dv.data_name = remap.get(vname, vname)
for vname in output:
try:
_ = var_dict[vname]
except KeyError:
raise KeyError("{} not found in {}".format(vname, network.name))
ov = e.output_variable.add()
ov.variable_name = vname
ov.data_name = remap.get(vname, vname)
for param in params:
d = e.parameter_variable.add()
d.variable_name = param
return e
# ----------------------------------------------------------------------
# Helper functions (END)
# ----------------------------------------------------------------------
def create_proto(contents, include_params=False):
proto = nnabla_pb2.NNablaProtoBuf()
if 'global_config' in contents:
proto.global_config.MergeFrom(
_create_global_config(contents['global_config']['default_context'])
)
if 'training_config' in contents:
proto.training_config.MergeFrom(
_create_training_config(contents['training_config']['max_epoch'],
contents['training_config'][
'iter_per_epoch'],
contents['training_config']['save_best']))
networks = {}
if 'networks' in contents:
proto_nets = []
for net in contents['networks']:
networks[net['name']] = _create_network(net)
proto_nets.append(networks[net['name']])
proto.network.extend(proto_nets)
datasets = {}
if 'datasets' in contents:
proto_datasets = []
for d in contents['datasets']:
if 'cache_dir' in d:
cache_dir = d['cache_dir']
else:
cache_dir = None
datasets[d['name']] = _create_dataset(d['name'],
d['uri'],
cache_dir,
d['variables'],
d['shuffle'],
d['batch_size'],
d['no_image_normalization'])
proto_datasets.append(datasets[d['name']])
proto.dataset.extend(proto_datasets)
if 'optimizers' in contents:
proto_optimizers = []
for o in contents['optimizers']:
proto_optimizers.append(_create_optimizer(o['name'], o['solver'],
networks[o['network']],
datasets[o['dataset']]))
proto.optimizer.extend(proto_optimizers)
if 'monitors' in contents:
proto_monitors = []
for m in contents['monitors']:
proto_monitors.append(_create_monitor(m['name'], m['monitor'],
networks[m['network']],
datasets[m['dataset']]))
proto.monitor.extend(proto_monitors)
if 'executors' in contents:
proto_executors = []
for e in contents['executors']:
proto_executors.append(
_create_executor(e['name'], networks[e['network']],
e['data'], e['output'], e.get('remp', {})))
proto.executor.extend(proto_executors)
if include_params is True:
params = get_parameters(grad_only=False)
for variable_name, variable in params.items():
parameter = proto.parameter.add()
parameter.variable_name = variable_name
parameter.shape.dim.extend(variable.shape)
parameter.data.extend(numpy.array(variable.d).flatten().tolist())
parameter.need_grad = variable.need_grad
return proto
def save(filename, contents, include_params=False):
'''Save network definition, inference/training execution
configurations etc.
Args:
filename (str): Filename to store infomation. The file
extension is used to determine the saving file format.
``.nnp``: (Recomended) Creating a zip archive with nntxt (network
definition etc.) and h5 (parameters).
``.nntxt``: Protobuf in text format.
``.protobuf'': Protobuf in binary format (unsafe in terms of
backward compatibility).
contents (dict): Information to store.
include_params (bool): Includes parameter into single file. This is
ignored when the extension of filename is nnp.
Example:
The current supported fields as contents are ``networks`` and
``executors``. The following example creates a two inputs and two
outputs MLP, and save the network structure and the initialized
| |
<gh_stars>0
from collections import namedtuple
import numpy as np
import param
from param.parameterized import bothmethod
from .core.dimension import OrderedDict
from .core.element import Element, Layout
from .core.options import CallbackError, Store
from .core.overlay import NdOverlay, Overlay
from .core.spaces import GridSpace
from .streams import SelectionExpr, PlotReset, Stream
from .util import DynamicMap
from .util.transform import dim
class _Cmap(Stream):
cmap = param.Parameter(default=None, allow_None=True)
_Exprs = Stream.define('Exprs', exprs=[])
_Styles = Stream.define('Styles', colors=[], alpha=1.)
_RegionElement = Stream.define("RegionElement", region_element=None)
_SelectionStreams = namedtuple(
'SelectionStreams',
'style_stream exprs_stream cmap_streams '
)
class _base_link_selections(param.ParameterizedFunction):
"""
Baseclass for linked selection functions.
Subclasses override the _build_selection_streams class method to construct
a _SelectionStreams namedtuple instance that includes the required streams
for implementing linked selections.
Subclasses also override the _expr_stream_updated method. This allows
subclasses to control whether new selections override prior selections or
whether they are combined with prior selections
"""
@bothmethod
def instance(self_or_cls, **params):
inst = super(_base_link_selections, self_or_cls).instance(**params)
# Init private properties
inst._selection_expr_streams = []
inst._reset_streams = []
# Init selection streams
inst._selection_streams = self_or_cls._build_selection_streams(inst)
# Init dict of region streams
inst._region_streams = {}
return inst
def _register(self, hvobj):
"""
Register an Element of DynamicMap that may be capable of generating
selection expressions in response to user interaction events
"""
# Create stream that produces element that displays region of selection
if isinstance(hvobj, DynamicMap):
eltype = hvobj.type
else:
eltype = type(hvobj)
if getattr(eltype, "_selection_streams", ()):
self._region_streams[hvobj] = _RegionElement()
# Create SelectionExpr stream
expr_stream = SelectionExpr(source=hvobj, index_cols=self.index_cols)
expr_stream.add_subscriber(
lambda **kwargs: self._expr_stream_updated(hvobj, **kwargs)
)
self._selection_expr_streams.append(expr_stream)
# Create PlotReset stream
reset_stream = PlotReset(source=hvobj)
reset_stream.add_subscriber(
lambda **kwargs: setattr(self, 'selection_expr', None)
)
self._reset_streams.append(reset_stream)
def __call__(self, hvobj, **kwargs):
# Apply kwargs as params
self.param.set_param(**kwargs)
if Store.current_backend not in Store.renderers:
raise RuntimeError("Cannot peform link_selections operation "
"since the selected backend %r is not "
"loaded. Load the plotting extension with "
"hv.extension or import the plotting "
"backend explicitly." % Store.current_backend)
# Perform transform
return self._selection_transform(hvobj.clone())
def _selection_transform(self, hvobj, operations=()):
"""
Transform an input HoloViews object into a dynamic object with linked
selections enabled.
"""
from .plotting.util import initialize_dynamic
if isinstance(hvobj, DynamicMap):
callback = hvobj.callback
if len(callback.inputs) > 1:
return Overlay([
self._selection_transform(el) for el in callback.inputs
]).collate()
initialize_dynamic(hvobj)
if issubclass(hvobj.type, Element):
self._register(hvobj)
chart = Store.registry[Store.current_backend][hvobj.type]
return chart.selection_display(hvobj).build_selection(
self._selection_streams, hvobj, operations,
self._region_streams.get(hvobj, None),
)
else:
# This is a DynamicMap that we don't know how to recurse into.
return hvobj
elif isinstance(hvobj, Element):
# Register hvobj to receive selection expression callbacks
chart = Store.registry[Store.current_backend][type(hvobj)]
if getattr(chart, 'selection_display', None):
element = hvobj.clone(link=False)
self._register(element)
return chart.selection_display(element).build_selection(
self._selection_streams, element, operations,
self._region_streams.get(element, None),
)
return hvobj
elif isinstance(hvobj, (Layout, Overlay, NdOverlay, GridSpace)):
data = OrderedDict([(k, self._selection_transform(v, operations))
for k, v in hvobj.items()])
if isinstance(hvobj, NdOverlay):
def compose(*args, **kwargs):
new = []
for k, v in data.items():
for i, el in enumerate(v[()]):
if i == len(new):
new.append([])
new[i].append((k, el))
return Overlay([hvobj.clone(n) for n in new])
new_hvobj = DynamicMap(compose)
new_hvobj.callback.inputs[:] = list(data.values())
else:
new_hvobj = hvobj.clone(data)
if hasattr(new_hvobj, 'collate'):
new_hvobj = new_hvobj.collate()
return new_hvobj
else:
# Unsupported object
return hvobj
@classmethod
def _build_selection_streams(cls, inst):
"""
Subclasses should override this method to return a _SelectionStreams
instance
"""
raise NotImplementedError()
def _expr_stream_updated(self, hvobj, selection_expr, bbox, region_element):
"""
Called when one of the registered HoloViews objects produces a new
selection expression. Subclasses should override this method, and
they should use the input expression to update the `exprs_stream`
property of the _SelectionStreams instance that was produced by
the _build_selection_streams.
Subclasses have the flexibility to control whether the new selection
express overrides previous selections, or whether it is combined with
previous selections.
"""
raise NotImplementedError()
class link_selections(_base_link_selections):
"""
Operation which automatically links selections between elements
in the supplied HoloViews object. Can be used a single time or
be used as an instance to apply the linked selections across
multiple objects.
"""
cross_filter_mode = param.Selector(
['overwrite', 'intersect'], default='intersect', doc="""
Determines how to combine selections across different
elements.""")
index_cols = param.List(default=None, doc="""
If provided, selection switches to index mode where all queries
are expressed solely in terms of discrete values along the
index_cols. All Elements given to link_selections must define the index_cols, either as explicit dimensions or by sharing an underlying Dataset that defines them.""")
selection_expr = param.Parameter(default=None, doc="""
dim expression of the current selection or None to indicate
that everything is selected.""")
selected_color = param.Color(default=None, allow_None=True, doc="""
Color of selected data, or None to use the original color of
each element.""")
selection_mode = param.Selector(
['overwrite', 'intersect', 'union', 'inverse'], default='overwrite', doc="""
Determines how to combine successive selections on the same
element.""")
show_regions = param.Boolean(default=True, doc="""
Whether to highlight the selected regions.""")
unselected_alpha = param.Magnitude(default=0.1, doc="""
Alpha of unselected data.""")
unselected_color = param.Color(default=None, doc="""
Color of unselected data.""")
@bothmethod
def instance(self_or_cls, **params):
inst = super(link_selections, self_or_cls).instance(**params)
# Initialize private properties
inst._obj_selections = {}
inst._obj_regions = {}
inst._reset_regions = True
return inst
@classmethod
def _build_selection_streams(cls, inst):
# Colors stream
style_stream = _Styles(
colors=[inst.unselected_color, inst.selected_color],
alpha=inst.unselected_alpha
)
# Cmap streams
cmap_streams = [
_Cmap(cmap=inst.unselected_cmap),
_Cmap(cmap=inst.selected_cmap),
]
def update_colors(*_):
colors = [inst.unselected_color, inst.selected_color]
style_stream.event(colors=colors, alpha=inst.unselected_alpha)
cmap_streams[0].event(cmap=inst.unselected_cmap)
if cmap_streams[1] is not None:
cmap_streams[1].event(cmap=inst.selected_cmap)
inst.param.watch(update_colors,['unselected_color', 'selected_color', 'unselected_alpha'])
# Exprs stream
exprs_stream = _Exprs(exprs=[True, None])
def update_exprs(*_):
exprs_stream.event(exprs=[True, inst.selection_expr])
# Reset regions
if inst._reset_regions:
for k, v in inst._region_streams.items():
inst._region_streams[k].event(region_element=None)
inst._obj_selections.clear()
inst._obj_regions.clear()
inst.param.watch(update_exprs, ['selection_expr'])
return _SelectionStreams(
style_stream=style_stream,
exprs_stream=exprs_stream,
cmap_streams=cmap_streams,
)
@property
def unselected_cmap(self):
"""
The datashader colormap for unselected data
"""
if self.unselected_color is None:
return None
return _color_to_cmap(self.unselected_color)
@property
def selected_cmap(self):
"""
The datashader colormap for selected data
"""
return None if self.selected_color is None else _color_to_cmap(self.selected_color)
def _expr_stream_updated(self, hvobj, selection_expr, bbox, region_element):
if selection_expr:
if self.cross_filter_mode == "overwrite":
# clear other regions and selections
for k, v in self._region_streams.items():
if k is not hvobj:
self._region_streams[k].event(region_element=None)
self._obj_regions.pop(k, None)
self._obj_selections.pop(k, None)
# Update selection expression
if hvobj not in self._obj_selections or self.selection_mode == "overwrite":
if self.selection_mode == "inverse":
self._obj_selections[hvobj] = ~selection_expr
else:
self._obj_selections[hvobj] = selection_expr
else:
if self.selection_mode == "intersect":
self._obj_selections[hvobj] &= selection_expr
elif self.selection_mode == "union":
self._obj_selections[hvobj] |= selection_expr
else: # inverse
self._obj_selections[hvobj] &= ~selection_expr
# Update region
if self.show_regions:
if isinstance(hvobj, DynamicMap):
el_type = hvobj.type
else:
el_type = hvobj
region_element = el_type._merge_regions(
self._obj_regions.get(hvobj, None), region_element, self.selection_mode
)
self._obj_regions[hvobj] = region_element
else:
region_element = None
# build combined selection
selection_exprs = list(self._obj_selections.values())
if self.index_cols:
if len(selection_exprs) > 1:
vals = set.intersection(*(set(expr.ops[2]['args'][0]) for expr in selection_exprs))
old = selection_exprs[0]
selection_expr = dim('new')
selection_expr.dimension = old.dimension
selection_expr.ops = list(old.ops)
selection_expr.ops[2] = dict(selection_expr.ops[2], args=(list(vals),))
else:
selection_expr = selection_exprs[0]
for expr in selection_exprs[1:]:
selection_expr = selection_expr & expr
# Set _reset_regions to False so that plot regions aren't automatically
# cleared when self.selection_expr is set.
self._reset_regions = False
self.selection_expr = selection_expr
self._reset_regions = True
# update this region stream
if self._region_streams.get(hvobj, None) is not None:
self._region_streams[hvobj].event(region_element=region_element)
class SelectionDisplay(object):
"""
Base class for selection display classes. Selection display classes are
responsible for transforming an element (or DynamicMap that produces an
element) into a HoloViews object that represents the current selection
state.
"""
def __call__(self, element):
return self
def build_selection(self, selection_streams, hvobj, operations, region_stream=None):
raise NotImplementedError()
@staticmethod
def _select(element, selection_expr, cache={}):
from .element import Curve, Spread
from .util.transform import dim
if isinstance(selection_expr, dim):
dataset = element.dataset
mask = None
if dataset._plot_id in cache:
ds_cache = cache[dataset._plot_id]
if selection_expr in ds_cache:
mask = ds_cache[selection_expr]
else:
ds_cache.clear()
else:
ds_cache = cache[dataset._plot_id] = {}
try:
if dataset.interface.gridded:
if mask is None:
mask = selection_expr.apply(dataset, expanded=True, flat=False, strict=False)
selection = dataset.clone(dataset.interface.mask(dataset, ~mask))
elif dataset.interface.multi:
if mask is None:
mask = selection_expr.apply(dataset, expanded=False, flat=False, strict=False)
selection = dataset.iloc[mask]
elif isinstance(element, (Curve, Spread)) and hasattr(dataset.interface, 'mask'):
if mask is None:
mask = selection_expr.apply(dataset, compute=False, strict=False)
selection = dataset.clone(dataset.interface.mask(dataset, ~mask))
else:
if mask is None:
mask = selection_expr.apply(dataset, compute=False, keep_index=True, strict=False)
selection = dataset.select(selection_mask=mask)
except KeyError as e:
key_error = str(e).replace('"', '').replace('.', '')
raise CallbackError("linked_selection aborted because it could not "
"display selection for all elements: %s on '%r'."
% (key_error, element))
except Exception as e:
raise CallbackError("linked_selection aborted because it could not "
"display selection for all elements: %s." % e)
ds_cache[selection_expr] = mask
else:
selection = element
return selection
class NoOpSelectionDisplay(SelectionDisplay):
"""
Selection display class | |
<filename>src/Dijkstra_rigid_Vis.py
import numpy as np
import cv2
try:
radius = int(input('Enter radius of the robot: '))
if radius < 0:
print("Invalid radius, setting radius to 0")
radius = 0
clearance = int(input('Enter clearance: '))
if clearance < 0:
print("Invalid clearance, setting clearance to 0")
clearance = 0
except:
print("Error: Invalid Input. Exiting program")
exit(3)
cl = radius + clearance
# Map creation with edges as '1' in order to provide a void border of the map
obs_map = np.zeros((302, 402), dtype=int)
obs_map[0, :] = 1
obs_map[301, :] = 1
obs_map[:, 0] = 1
obs_map[:, 401] = 1
class Queue:
# Creating a class to convert a list into a queue
def __init__(self):
self.queue = []
def add(self, node):
self.queue.append(node)
def pop(self):
ind = self.queue.index(min(self.queue))
node = self.queue.pop(ind)
return node
def __len__(self):
return len(self.queue)
# Creating a class to determine the node of the iteration. Node is the puzzle state.
class Node:
# Defining the __init__ function
def __init__(self, data, parent, act, cost):
self.data = data
self.parent = parent
self.act = act
self.id = self.get_id()
self.cost = cost
def __eq__(self, other):
if hasattr(other, 'cost'):
return self.cost == other.cost
else:
raise NotImplementedError('Not supported between given types')
def __ne__(self, other):
if hasattr(other, 'cost'):
return self.cost != other.cost
else:
raise NotImplementedError('Not supported between given types')
def __lt__(self, other):
if hasattr(other, 'cost'):
return self.cost < other.cost
else:
raise NotImplementedError('Not supported between given types')
def __gt__(self, other):
if hasattr(other, 'cost'):
return self.cost > other.cost
else:
raise NotImplementedError('Not supported between given types')
def __le__(self, other):
if hasattr(other, 'cost'):
return self.cost <= other.cost
else:
raise NotImplementedError('Not supported between given types')
def __ge__(self, other):
if hasattr(other, 'cost'):
return self.cost >= other.cost
else:
raise NotImplementedError('Not supported between given types')
# Defining a function to generate a unique id of the state of the puzzle.
def get_id(self):
_id = np.ravel(self.data).tolist()
_id = [str(item) for item in _id]
_id = "-".join(_id)
self.id = _id
return self.id
# Defining the __repr__ function
def __repr__(self):
return str(self.data)
# Creating a function to define the circle obstacle's area on the map
def getCircleObstacle(i, j):
global cl
cond = ((j - 90) ** 2) + ((i - 70) ** 2) <= ((35 + cl) ** 2)
return cond
# Creating a function to define the C shape obstacle's area on the map
def getCShapeObstacle(i, j):
global cl
cond1 = i <= 270 - cl
cond2 = i <= 280 + cl
cond3 = j >= 200 - cl
cond4 = j >= 210 + cl
cond5 = i >= 240 + cl
cond6 = i >= 230 - cl
cond7 = j <= 230 + cl
ret_val = ((cond2 and cond3 and cond6 and cond7) and not (cond1 and cond4 and cond5 and cond7))
return ret_val
# Creating a function to define the slanted rectangle obstacle's area on the map
def getSlantedRectObstacle(i, j):
global cl
s1 = 0.7
s2 = -1.42814
x1 = np.arctan(s1)
x2 = np.arctan(s2)
d1 = np.cos(np.pi - x1)
d2 = np.cos(np.pi - x2)
a = -(cl / d1)
b = -(cl / d2)
cond1 = (i) + (1.42814 * j) >= (176.5511 - b)
cond2 = (i) - (0.7 * j) >= (74.39 - a)
cond3 = (i) + (1.42814 * j) <= (428.06815 + b)
cond4 = (i) - (0.7 * j) <= (98.80545 + a)
ret_val = (cond1 and cond2 and cond3 and cond4)
return ret_val
# Creating a function to define the ellipse obstacle's area on the map
def getEllipseObstacle(i, j):
global cl
cond = (((j - 246) / (60 + cl)) ** 2) + (((i - 145) / (30 + cl)) ** 2) <= 1
return cond
def getBorderClearance(i, j):
global cl
cond1 = j >= 402 - cl
cond2 = j <= cl
cond3 = i >= 302 - cl
cond4 = i <= cl
ret_val = cond1 or cond2 or cond3 or cond4
return ret_val
# Creating a function to define the polygon obstacle's area on the map
# The Polygon is divided into a rectangle and two triangles
# Creating an if-condition to change value of the element in area under all obstacles to '1' in order to create a void in the map
for i in range(obs_map.shape[0]):
for j in range(obs_map.shape[1]):
if getCircleObstacle(obs_map.shape[0] - i, j) or getCShapeObstacle(obs_map.shape[0] - i,
j) or getSlantedRectObstacle(
obs_map.shape[0] - i, j) or getEllipseObstacle(obs_map.shape[0] - i, j) or getBorderClearance(
obs_map.shape[0] - i, j):
obs_map[i, j] = 1
# Defining the move up function where if the element above does not have '1' value, i.e. if there isn't a void in the element above, the object moves up
def move_up(i, j):
if obs_map[i - 1, j] != 1:
return (i - 1, j)
# Defining the move down function where if the element below does not have '1' value, i.e. if there isn't a void in the element below, the object moves down
def move_down(i, j):
if obs_map[i + 1, j] != 1:
return (i + 1, j)
# Defining the move left function where if the element on the left does not have '1' value, i.e. if there isn't a void in the element on the left, the object moves left
def move_left(i, j):
if obs_map[i, j - 1] != 1:
return (i, j - 1)
# Defining the move right function where if the element on the right does not have '1' value, i.e. if there isn't a void in the element on the right, the object moves right
def move_right(i, j):
if obs_map[i, j + 1] != 1:
return (i, j + 1)
# Defining the move up left function where if the element above and left does not have '1' value, i.e. if there isn't a void in the element above and left , the object moves up left
def move_up_left(i, j):
if obs_map[i - 1, j - 1] != 1:
return (i - 1, j - 1)
# Defining the move up right function where if the element above and right does not have '1' value, i.e. if there isn't a void in the element above and right, the object moves up right
def move_up_right(i, j):
if obs_map[i - 1, j + 1] != 1:
return (i - 1, j + 1)
# Defining the move down left function where if the element below and left does not have '1' value, i.e. if there isn't a void in the element below and left, the object moves down left
def move_down_left(i, j):
if obs_map[i + 1, j - 1] != 1:
return (i + 1, j - 1)
# Defining the move down right function where if the element below and right does not have '1' value, i.e. if there isn't a void in the element below and right, the object moves down right
def move_down_right(i, j):
if obs_map[i + 1, j + 1] != 1:
return (i + 1, j + 1)
# Defining a function to generate new legal moves as per the state
def generate_new_moves(state):
list_states = []
for func in [move_left, move_right, move_down, move_up]:
cost = state.cost + 1
dum_state = state.data
out_state = func(dum_state[0], dum_state[1])
if out_state is not None:
list_states.append((out_state, cost))
for func in [move_up_left, move_up_right, move_down_left,
move_down_right]:
cost = state.cost + 1.414
dum_state = state.data
out_state = func(dum_state[0], dum_state[1])
if out_state is not None:
list_states.append((out_state, cost))
return list_states
# Inputting values from the user and checking if the values are valid by checking the outbound values and in-obstacle values
try:
start_node_x = int(input('Enter start node x postion: '))
if start_node_x < 0:
print("Invalid start node x position, setting x postion to 0")
start_node_x = 0
elif start_node_x > 402:
print("Invalid start node x position, setting x postion to 403")
start_node_x = 402
start_node_y = int(input('Enter start node y postion: '))
if start_node_y < 0:
print("Invalid start node y position, setting y postion to 0")
start_node_y = 0
elif start_node_y > 302:
print("Invalid start node y position, setting y postion to 300")
start_node_y = 302
goal_node_x = int(input('Enter goal node x postion: '))
if goal_node_x < 0:
| |
self._parents[node]})
# if coming from parent and see unconditioned node, can go through children
if node not in C:
schedule.update({(child, _p) for child in self._children[node]})
return True
def local_markov_statements(self) -> Set[Tuple[Any, FrozenSet, FrozenSet]]:
"""
Return the local Markov statements of this DAG, i.e., those of the form ``i`` independent nondescendants(i) given
the parents of ``i``.
Returns
-------
set
The set of tuples of the form (``i``, ``A``, ``C``) representing the local Markov statements of the DAG
via (``i`` independent of ``A`` given ``C``).
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (3, 2)})
>>> g.local_markov_statements()
{(1, frozenset({3}), frozenset()), (2, frozenset(), frozenset({1, 3})), (3, frozenset({1}), frozenset())}
"""
statements = set()
for node in self._nodes:
parents = self._parents[node]
nondescendants = self._nodes - {node} - self.descendants_of(node) - parents
statements.add((node, frozenset(nondescendants), frozenset(parents)))
return statements
# === CONVERSION TO OTHER GRAPHS
def moral_graph(self):
"""
Return the (undirected) moral graph of this DAG, i.e., the graph with the parents of all nodes made adjacent.
Returns
-------
UndirectedGraph:
Moral graph of this DAG.
Examples
--------
>>> from graphical_models import DAG
>>> d = DAG(arcs={(1, 3), (2, 3)})
>>> ug = d.moral_graph()
>>> ug.edges
{frozenset({1, 3}), frozenset({2, 3}), frozenset({1, 2})}
"""
warn_untested() # TODO: ADD TEST
from causaldag import UndirectedGraph
edges = {(i, j) for i, j in self._arcs} | {(p1, p2) for p1, node, p2 in self.vstructures()}
return UndirectedGraph(self._nodes, edges)
def marginal_mag(self, latent_nodes, relabel=None, new=True):
"""
Return the maximal ancestral graph (MAG) that results from marginalizing out ``latent_nodes``.
Parameters
----------
latent_nodes:
nodes to marginalize over.
relabel:
if relabel='default', relabel the nodes to have labels 1,2,...,(#nodes).
new:
TODO - pick whether to use new or old implementation.
Returns
-------
m:
AncestralGraph, the MAG resulting from marginalizing out `latent_nodes`.
Examples
--------
>>> from graphical_models import DAG
>>> d = DAG(arcs={(1, 3), (1, 2)})
>>> mag = d.marginal_mag(latent_nodes={1})
>>> mag
Directed edges: set(), Bidirected edges: {frozenset({2, 3})}, Undirected edges: set()
>>> mag = d.marginal_mag(latent_nodes={1}, relabel="default")
Directed edges: set(), Bidirected edges: {frozenset({0, 1})}, Undirected edges: set()
"""
warn_untested() # TODO: ADD TEST
from graphical_models import AncestralGraph
latent_nodes = core_utils.to_set(latent_nodes)
if not new:
new_nodes = self._nodes - latent_nodes
directed = set()
bidirected = set()
for i, j in itr.combinations(self._nodes - latent_nodes, r=2):
adjacent = all(not self.dsep(i, j, S) for S in core_utils.powerset(self._nodes - {i, j} - latent_nodes))
if adjacent:
if self.is_ancestor_of(i, j):
directed.add((i, j))
elif self.is_ancestor_of(j, i):
directed.add((j, i))
else:
bidirected.add((i, j))
if relabel is not None:
t = self.topological_sort()
t_new = [node for node in t if node not in latent_nodes]
node2new_label = dict(map(reversed, enumerate(t_new)))
new_nodes = {node2new_label[node] for node in new_nodes}
directed = {(node2new_label[i], node2new_label[j]) for i, j in directed}
bidirected = {(node2new_label[i], node2new_label[j]) for i, j in bidirected}
return AncestralGraph(nodes=new_nodes, directed=directed, bidirected=bidirected)
else:
# ag = AncestralGraph(nodes=self._nodes, directed=self._arcs)
# curr_directed = ag.directed
# curr_bidirected = ag.bidirected
#
# while True:
# for node in latent_nodes:
# parents = ag._parents[node]
# children = ag._children[node]
# spouses = ag._spouses[node]
# for j, i in itr.product(parents, children):
# ag._add_directed(j, i, ignore_error=True)
# for i, j in itr.combinations(children, 2):
# ag._add_bidirected(i, j, ignore_error=True)
# for i, j in itr.product(children, spouses):
# ag._add_bidirected(i, j, ignore_error=True)
#
# last_directed = curr_directed
# last_bidirected = curr_bidirected
# curr_directed = ag.directed
# curr_bidirected = ag.bidirected
# if curr_directed == last_directed and curr_bidirected == last_bidirected:
# break
# for node in latent_nodes:
# ag.remove_node(node, ignore_error=True)
ag = AncestralGraph(nodes=self._nodes, directed=self._arcs)
ancestor_dict = ag.ancestor_dict()
for i, j in itr.combinations(self._nodes - latent_nodes, 2):
S = (ancestor_dict[i] | ancestor_dict[j]) - {i, j} - latent_nodes
if not ag.has_any_edge(i, j) and not ag.msep(i, j, S):
if i in ancestor_dict[j]:
ag._add_directed(i, j)
elif j in ancestor_dict[i]:
ag._add_directed(j, i)
else:
ag._add_bidirected(i, j)
for node in latent_nodes:
ag.remove_node(node, ignore_error=True)
if relabel is not None:
if relabel == 'default':
relabel = {node: ix for ix, node in enumerate(sorted(self._nodes - set(latent_nodes)))}
new_nodes = {relabel[node] for node in self._nodes - set(latent_nodes)}
directed = {(relabel[i], relabel[j]) for i, j in ag.directed}
bidirected = {(relabel[i], relabel[j]) for i, j in ag.bidirected}
return AncestralGraph(new_nodes, directed=directed, bidirected=bidirected)
return ag
def cpdag(self):
"""
Return the completed partially directed acyclic graph (CPDAG, aka essential graph) that represents the
Markov equivalence class of this DAG.
Return
------
causaldag.PDAG:
CPDAG representing the MEC of this DAG.
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (2, 4), (3, 4)})
>>> cpdag = g.cpdag()
>>> cpdag.edges
{frozenset({1, 2})}
>>> cpdag.arcs
{(2, 4), (3, 4)}
"""
from graphical_models import PDAG
pdag = PDAG(nodes=self._nodes, arcs=self._arcs, known_arcs=self.arcs_in_vstructures())
pdag.remove_unprotected_orientations()
return pdag
def cpdag_new(self, new=False):
from graphical_models import PDAG
vstruct = self.arcs_in_vstructures()
pdag = PDAG(nodes=self._nodes, arcs=vstruct, edges=self._arcs - vstruct)
if new:
pdag.to_complete_pdag_new()
else:
pdag.to_complete_pdag()
return pdag
def interventional_cpdag(self, interventions: List[set], cpdag=None):
"""
Return the interventional essential graph (aka CPDAG) associated with this DAG.
Parameters
----------
interventions:
A list of the intervention targets.
cpdag:
The original (non-interventional) CPDAG of the graph. Faster when provided.
Return
------
causaldag.PDAG:
Interventional CPDAG representing the I-MEC of this DAG.
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (2, 4), (3, 4)})
>>> cpdag = g.cpdag()
>>> icpdag = g.interventional_cpdag([{1}], cpdag=cpdag)
>>> icpdag.arcs
{(1, 2), (2, 4), (3, 4)}
"""
warn_untested() # TODO: ADD TEST
from graphical_models import PDAG
if cpdag is None:
raise ValueError('Need the CPDAG')
# dag_cut = self.copy()
# known_arcs = set()
# for node in intervened_nodes:
# for i, j in dag_cut.incoming_arcs(node):
# dag_cut.remove_arc(i, j)
# known_arcs.update(self.outgoing_arcs(node))
# known_arcs.update(dag_cut.vstructs())
# pdag = PDAG(dag_cut._nodes, dag_cut._arcs, known_arcs=known_arcs)
else:
cut_edges = set()
for iv_nodes in interventions:
cut_edges.update({(i, j) for i, j in self._arcs if len({i, j} & set(iv_nodes)) == 1})
known_arcs = cut_edges | cpdag._known_arcs
pdag = PDAG(self._nodes, self._arcs, known_arcs=known_arcs)
pdag.remove_unprotected_orientations()
return pdag
# === CHICKERING SEQUENCE
def _is_resolved_sink(self, other, node, res_sinks):
no_children = not (self._children[node] - res_sinks)
no_children_other = not (other._children[node] - res_sinks)
same_parents = self._parents[node] == other._parents[node]
return no_children and no_children_other and same_parents
def resolved_sinks(self, other) -> set:
"""
Return the nodes in this graph which are "resolved sinks" with respect to the graph ``other``.
A "resolved sink" is a node which has the same parents in both graphs, and no children which are
not themselves resolved sinks.
Parameters
----------
other
TODO
Examples
--------
>>> from graphical_models import DAG
>>> d1 = DAG(arcs={(1, 0), (1, 2), (2, 0)})
>>> d2 = DAG(arcs={(2, 0), (2, 1), (1, 0)})
>>> res_sinks = d1.resolved_sinks(d2)
{0}
"""
# warn_untested() # TODO: ADD TEST
res_sinks = set()
while True:
new_resolved = {node for node in self._nodes - res_sinks if self._is_resolved_sink(other, node, res_sinks)}
res_sinks.update(new_resolved)
if not new_resolved:
break
return res_sinks
def chickering_sequence(self, imap, verbose=False):
"""
Return a *Chickering sequence* from this DAG to an I-MAP ``imap``.
A Chickering sequence from DAG ``D1`` to a DAG ``D2`` is a sequence of DAGs starting at ``D1`` and ending at
``D2``, with consecutive DAGs differing by a single edge reversal or edge deletion, such that each DAG is an
IMAP of ``D1``.
See Chickering, <NAME>. "Optimal structure identification with greedy search." (2002) for more details.
Parameters
----------
imap: DAG
The I-MAP of this DAG at which the Chickering sequence will end.
Examples
--------
>>> from graphical_models import DAG
>>> d1 = DAG(arcs={(0, 1), (1, 2)})
>>> d2 = DAG(arcs={(2, 0), (2, 1), (1, 0)})
>>> sequence, moves = d1.chickering_sequence(d2)
>>> sequence[1].arcs
{(1, 0), (1, 2)}
>>> sequence[2].arcs
{(1, 0), (1, 2), (2, 0)}
>>> moves
[
{'sink': 0, 'move': 6, 'd': 2},
{'sink': 0, 'move': 4},
{'sink': 1, 'move': 6, 'd': 2}
]
"""
# warn_untested() # TODO: ADD TEST
curr_graph = self
ch_seq = []
moves = []
last_sink = None
while curr_graph != imap:
ch_seq.append(curr_graph)
curr_graph, move = curr_graph.apply_edge_operation(imap, seed_sink=last_sink, verbose=verbose)
moves.append(move)
ch_seq.append(imap)
return ch_seq, moves
def apply_edge_operation(self, imap, seed_sink=None, verbose=False):
"""
Identify an edge operation (covered edge reversal or edge addition) which decreases the Chickering distance
from this DAG to ``imap``.
See |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.