docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function.
|
def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
return cls._binary_op(x, y, tf.minimum, tf.float32)
| 727,773
|
Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape.
|
def if_then_else(cls,
condition: 'TensorFluent',
true_case: 'TensorFluent',
false_case: 'TensorFluent') -> 'TensorFluent':
true = TensorFluent.constant(True, tf.bool)
false = TensorFluent.constant(False, tf.bool)
ite = (condition == true) * true_case + (condition == false) * false_case
if true_case.dtype == tf.bool and false_case.dtype == tf.bool:
ite = ite.cast(tf.bool)
return ite
| 727,774
|
Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output.
|
def _binary_op(cls,
x: 'TensorFluent',
y: 'TensorFluent',
op: Callable[[tf.Tensor, tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
# scope
s1 = x.scope.as_list()
s2 = y.scope.as_list()
scope, perm1, perm2 = TensorFluentScope.broadcast(s1, s2)
if x.batch and perm1 != []:
perm1 = [0] + [p+1 for p in perm1]
if y.batch and perm2 != []:
perm2 = [0] + [p+1 for p in perm2]
x = x.transpose(perm1)
y = y.transpose(perm2)
# shape
reshape1, reshape2 = TensorFluentShape.broadcast(x.shape, y.shape)
if reshape1 is not None:
x = x.reshape(reshape1)
if reshape2 is not None:
y = y.reshape(reshape2)
# dtype
x = x.cast(dtype)
y = y.cast(dtype)
# operation
t = op(x.tensor, y.tensor)
# batch
batch = x.batch or y.batch
return TensorFluent(t, scope, batch=batch)
| 727,775
|
Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output.
|
def _unary_op(cls,
x: 'TensorFluent',
op: Callable[[tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
x = x.cast(dtype)
t = op(x.tensor)
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(t, scope, batch=batch)
| 727,776
|
Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
|
def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: 'TensorFluent',
vars_list: List[str]) -> 'TensorFluent':
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch)
| 727,777
|
Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis.
|
def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis
| 727,778
|
Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation.
|
def cast(self, dtype: tf.DType) -> 'TensorFluent':
if self.dtype == dtype:
return self
t = tf.cast(self.tensor, dtype)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch)
| 727,779
|
Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation.
|
def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':
t = tf.reshape(self.tensor, shape)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch)
| 727,780
|
Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation.
|
def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent':
if permutation == []:
return self
t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch)
| 727,781
|
Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function.
|
def sum(self, vars_list: List[str]) -> 'TensorFluent':
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_sum, operand, vars_list)
| 727,782
|
Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function.
|
def avg(self, vars_list: List[str]) -> 'TensorFluent':
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_mean, operand, vars_list)
| 727,783
|
Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
|
def prod(self, vars_list: List[str]) -> 'TensorFluent':
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list)
| 727,784
|
Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function.
|
def maximum(self, vars_list: List[str]) -> 'TensorFluent':
return self._aggregation_op(tf.reduce_max, self, vars_list)
| 727,785
|
Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function.
|
def minimum(self, vars_list: List[str]) -> 'TensorFluent':
return self._aggregation_op(tf.reduce_min, self, vars_list)
| 727,786
|
Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function.
|
def forall(self, vars_list: List[str]) -> 'TensorFluent':
return self._aggregation_op(tf.reduce_all, self, vars_list)
| 727,787
|
Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
|
def exists(self, vars_list: List[str]) -> 'TensorFluent':
return self._aggregation_op(tf.reduce_any, self, vars_list)
| 727,788
|
Returns a TensorFluent for the addition arithmetic operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __add__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.add, tf.float32)
| 727,789
|
Returns a TensorFluent for the subtraction arithmetic operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __sub__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.subtract, tf.float32)
| 727,790
|
Returns a TensorFluent for the multiplication arithmetic operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __mul__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.multiply, tf.float32)
| 727,791
|
Returns a TensorFluent for the division arithmetic operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __truediv__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.divide, tf.float32)
| 727,792
|
Returns a TensorFluent for the and logical operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __and__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.logical_and, tf.bool)
| 727,793
|
Returns a TensorFluent for the or logical operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __or__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.logical_or, tf.bool)
| 727,794
|
Returns a TensorFluent for the xor logical operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output.
|
def __xor__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.logical_xor, tf.bool)
| 727,795
|
Returns a TensorFluent for the less-than-or-equal relational operator.
Args:
self: The first operand.
other: The second operand.
|
def __le__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.less_equal, tf.float32)
| 727,796
|
Returns a TensorFluent for the less-then relational operator.
Args:
self: The first operand.
other: The second operand.
|
def __lt__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.less, tf.float32)
| 727,797
|
Returns a TensorFluent for the greater-then-or-equal relational operator.
Args:
self: The first operand.
other: The second operand.
|
def __ge__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.greater_equal, tf.float32)
| 727,798
|
Returns a TensorFluent for the greater-than relational operator.
Args:
self: The first operand.
other: The second operand.
|
def __gt__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.greater, tf.float32)
| 727,799
|
Returns a TensorFluent for the equal relational operator.
Args:
self: The first operand.
other: The second operand.
|
def __eq__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.equal, tf.float32)
| 727,800
|
Returns a TensorFluent for the not-equal relational operator.
Args:
self: The first operand.
other: The second operand.
|
def __ne__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.not_equal, tf.float32)
| 727,801
|
Helps object methods handle MatrixRequestError.
Args:
method(function): Object method to be wrapped
Method's object must have _handle_request_exception method that deals with
specific status codes and errcodes.
|
def intent(method):
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except exceptions.MatrixError as e:
if isinstance(e.original_exception,
matrix_client.errors.MatrixRequestError):
self._handle_request_exception(e)
# May still throw exception for other reasons; not handled
return method(self, *args, **kwargs)
else:
raise e
return wrapper
| 727,941
|
Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
|
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
| 728,004
|
Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
|
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
| 728,005
|
This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
|
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
| 728,006
|
Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
|
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
| 728,008
|
Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
|
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
| 728,009
|
It broadcasts the fluent shapes if any input is in batch mode.
It handles input shapes in different modes, expanding its
dimensions if necessary. It outputs a tuple with new shapes.
If no input shape is in batch mode, return (None, None).
If an input shape does not need to be changed, return None.
Args:
shape1: A fluent's shape.
shape2: A fluent's shape.
Returns:
A pair of new shapes.
|
def broadcast(cls,
shape1: 'TensorFluentShape',
shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]:
reshape_1, reshape_2 = None, None
if not (shape1._batch or shape2._batch):
return reshape_1, reshape_2
size_1, size_2 = shape1.fluent_size, shape2.fluent_size
size_diff = abs(size_1 - size_2)
if size_diff == 0:
return reshape_1, reshape_2
if size_2 > size_1 and not (size_1 == 0 and not shape1._batch):
reshape_1 = [1] * size_diff + list(shape1.fluent_shape)
if shape1._batch:
reshape_1 = [shape1.batch_size] + reshape_1
elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch):
reshape_2 = [1] * size_diff + list(shape2.fluent_shape)
if shape2._batch:
reshape_2 = [shape2.batch_size] + reshape_2
return reshape_1, reshape_2
| 728,067
|
Returns a tuple of tensors representing the initial state fluents.
Args:
batch_size (Optional[int]): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors.
|
def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
with self.graph.as_default():
with tf.name_scope('initial_state'):
self._initialize_initial_state_fluents()
if batch_size is None:
return self.initial_state_fluents
return self._compile_batch_fluents(self.initial_state_fluents, batch_size)
| 728,117
|
Returns a tuple of tensors representing the default action fluents.
Args:
batch_size (int): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors.
|
def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
with self.graph.as_default():
with tf.name_scope('default_action'):
self._initialize_default_action_fluents()
if batch_size is None:
return self.default_action_fluents
return self._compile_batch_fluents(self.default_action_fluents, batch_size)
| 728,118
|
Compiles the intermediate and next state fluent CPFs given
the current `state` and `action`.
Args:
state (Sequence[tf.Tensor]): A tuple of state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
Returns:
Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs.
|
def cpfs(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]:
scope = self.transition_scope(state, action)
batch_size = int(state[0].shape[0])
interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise)
interms = [fluent for _, fluent in interm_fluents]
next_state = [fluent for _, fluent in next_state_fluents]
return interms, next_state
| 728,119
|
Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function.
|
def reward(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> tf.Tensor:
scope = self.reward_scope(state, action, next_state)
r = self.compile_reward(scope).tensor
with self.graph.as_default():
with tf.name_scope('reward'):
return tf.expand_dims(r, -1)
| 728,120
|
Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs.
|
def compile_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> Tuple[List[CPFPair], List[CPFPair]]:
interm_fluents = self.compile_intermediate_cpfs(scope, batch_size, noise)
scope.update(dict(interm_fluents))
next_state_fluents = self.compile_state_cpfs(scope, batch_size, noise)
return interm_fluents, next_state_fluents
| 728,121
|
Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
|
def compile_intermediate_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
interm_fluents = []
with self.graph.as_default():
with tf.name_scope('intermediate_cpfs'):
for cpf in self.rddl.domain.intermediate_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
interm_fluents.append((cpf.name, t))
scope[cpf.name] = t
return interm_fluents
| 728,122
|
Compiles the next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
|
def compile_state_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
next_state_fluents = []
with self.graph.as_default():
with tf.name_scope('state_cpfs'):
for cpf in self.rddl.domain.state_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
next_state_fluents.append((cpf.name, t))
key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0])
next_state_fluents = sorted(next_state_fluents, key=key)
return next_state_fluents
| 728,123
|
Compiles the reward function given the fluent `scope`.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.
Returns:
A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.
|
def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent:
reward_expr = self.rddl.domain.reward
with self.graph.as_default():
with tf.name_scope('reward'):
return self._compile_expression(reward_expr, scope)
| 728,124
|
Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
|
def compile_state_action_constraints(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.transition_scope(state, action)
constraints = []
with self.graph.as_default():
with tf.name_scope('state_action_constraints'):
for p in self.rddl.domain.constraints:
fluent = self._compile_expression(p, scope)
constraints.append(fluent)
return constraints
| 728,125
|
Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
|
def compile_action_preconditions(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.action_precondition_scope(state, action)
preconds = []
with self.graph.as_default():
with tf.name_scope('action_preconditions'):
for p in self.rddl.domain.preconds:
fluent = self._compile_expression(p, scope)
preconds.append(fluent)
return preconds
| 728,126
|
Compiles the state invarints given current `state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
|
def compile_state_invariants(self,
state: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.state_invariant_scope(state)
invariants = []
with self.graph.as_default():
with tf.name_scope('state_invariants'):
for p in self.rddl.domain.invariants:
fluent = self._compile_expression(p, scope)
invariants.append(fluent)
return invariants
| 728,127
|
Combines the action preconditions into an applicability checking op.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A boolean tensor for checking if `action` is application in `state`.
|
def compile_action_preconditions_checking(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> tf.Tensor:
with self.graph.as_default():
with tf.name_scope('action_preconditions_checking'):
preconds = self.compile_action_preconditions(state, action)
all_preconds = tf.stack([p.tensor for p in preconds], axis=1)
checking = tf.reduce_all(all_preconds, axis=1)
return checking
| 728,128
|
Compiles all actions bounds for the given `state`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from action names to a pair of
:obj:`rddl2tf.fluent.TensorFluent` representing
its lower and upper bounds.
|
def compile_action_bound_constraints(self,
state: Sequence[tf.Tensor]) -> Dict[str, Bounds]:
scope = self.action_precondition_scope(state)
lower_bounds = self.rddl.domain.action_lower_bound_constraints
upper_bounds = self.rddl.domain.action_upper_bound_constraints
with self.graph.as_default():
with tf.name_scope('action_bound_constraints'):
bounds = {}
for name in self.rddl.domain.action_fluent_ordering:
lower_expr = lower_bounds.get(name)
lower = None
if lower_expr is not None:
with tf.name_scope('lower_bound'):
lower = self._compile_expression(lower_expr, scope)
upper_expr = upper_bounds.get(name)
upper = None
if upper_expr is not None:
with tf.name_scope('upper_bound'):
upper = self._compile_expression(upper_expr, scope)
bounds[name] = (lower, upper)
return bounds
| 728,129
|
Returns a partial scope with current state-fluents.
Args:
state_fluents (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
def state_scope(self, state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
return dict(zip(self.rddl.domain.state_fluent_ordering, state_fluents))
| 728,131
|
Returns a partial scope with current action-fluents.
Args:
action_fluents (Sequence[tf.Tensor]): The action fluents.
Returns:
A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))
| 728,132
|
Returns a partial scope with current next state-fluents.
Args:
next_state_fluents (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))
| 728,133
|
Returns the complete transition fluent scope
for the current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
def transition_scope(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
scope = {}
scope.update(self.non_fluents_scope())
scope.update(self.state_scope(state))
scope.update(self.action_scope(action))
return scope
| 728,134
|
Returns the complete reward fluent scope for the
current `state`, `action` fluents, and `next_state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
next_state (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
def reward_scope(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
scope = {}
scope.update(self.non_fluents_scope())
scope.update(self.state_scope(state))
scope.update(self.action_scope(action))
scope.update(self.next_state_scope(next_state))
return scope
| 728,135
|
Returns the state invariant fluent scope for the current `state`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
def state_invariant_scope(self, state: Sequence[tf.Tensor]):
scope = {}
scope.update(self.non_fluents_scope())
scope.update(self.state_scope(state))
return scope
| 728,136
|
Compile the expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent.
|
def _compile_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype2compiler = {
'constant': self._compile_constant_expression,
'pvar': self._compile_pvariable_expression,
'randomvar': self._compile_random_variable_expression,
'arithmetic': self._compile_arithmetic_expression,
'boolean': self._compile_boolean_expression,
'relational': self._compile_relational_expression,
'func': self._compile_function_expression,
'control': self._compile_control_flow_expression,
'aggregation': self._compile_aggregation_expression
}
etype = expr.etype
if etype[0] not in etype2compiler:
raise ValueError('Expression type unknown: {}'.format(etype))
with self.graph.as_default():
compiler_fn = etype2compiler[etype[0]]
return compiler_fn(expr, scope, batch_size, noise)
| 728,142
|
Compile a constant expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
def _compile_constant_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
dtype = utils.python_type_to_dtype(etype[1])
fluent = TensorFluent.constant(args, dtype=dtype)
return fluent
| 728,143
|
Compile a pvariable expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
def _compile_pvariable_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
name = expr._pvar_to_name(args)
if name not in scope:
raise ValueError('Variable {} not in scope.'.format(name))
fluent = scope[name]
scope = args[1] if args[1] is not None else []
if isinstance(fluent, TensorFluent):
fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch)
elif isinstance(fluent, tf.Tensor):
fluent = TensorFluent(fluent, scope, batch=self.batch_mode)
else:
raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent))
return fluent
| 728,144
|
Compile an arithmetic expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
def _compile_arithmetic_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
if len(args) == 1:
etype2op = {
'+': lambda x: x,
'-': lambda x: -x
}
if etype[1] not in etype2op:
raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr))
op = etype2op[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
fluent = op(x)
else:
etype2op = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y,
}
if etype[1] not in etype2op:
raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr))
op = etype2op[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
y = self._compile_expression(args[1], scope, batch_size, noise)
fluent = op(x, y)
return fluent
| 728,146
|
Compile a function expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
def _compile_function_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
if len(args) == 1:
etype2func = {
'abs': TensorFluent.abs,
'exp': TensorFluent.exp,
'log': TensorFluent.log,
'sqrt': TensorFluent.sqrt,
'cos': TensorFluent.cos,
'sin': TensorFluent.sin,
'tan': TensorFluent.tan,
'acos': TensorFluent.acos,
'arccos': TensorFluent.acos,
'asin': TensorFluent.asin,
'arcsin': TensorFluent.asin,
'atan': TensorFluent.atan,
'arctan': TensorFluent.atan,
'round': TensorFluent.round,
'ceil': TensorFluent.ceil,
'floor': TensorFluent.floor
}
if etype[1] not in etype2func:
raise ValueError('Invalid unary function expression:\n{}'.format(expr))
op = etype2func[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
fluent = op(x)
else:
etype2func = {
'pow': TensorFluent.pow,
'max': TensorFluent.max,
'min': TensorFluent.min
}
if etype[1] not in etype2func:
raise ValueError('Invalid binary function expression:\n{}'.format(expr))
op = etype2func[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
y = self._compile_expression(args[1], scope, batch_size, noise)
fluent = op(x, y)
return fluent
| 728,147
|
Compile a control flow expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
def _compile_control_flow_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
if etype[1] == 'if':
condition = self._compile_expression(args[0], scope, batch_size, noise)
true_case = self._compile_expression(args[1], scope, batch_size, noise)
false_case = self._compile_expression(args[2], scope, batch_size, noise)
fluent = TensorFluent.if_then_else(condition, true_case, false_case)
else:
raise ValueError('Invalid control flow expression:\n{}'.format(expr))
return fluent
| 728,148
|
Compile an aggregation expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
def _compile_aggregation_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
typed_var_list = args[:-1]
vars_list = [var for _, (var, _) in typed_var_list]
expr = args[-1]
x = self._compile_expression(expr, scope)
etype2aggr = {
'sum': x.sum,
'prod': x.prod,
'avg': x.avg,
'maximum': x.maximum,
'minimum': x.minimum,
'exists': x.exists,
'forall': x.forall
}
if etype[1] not in etype2aggr:
raise ValueError('Invalid aggregation expression {}.'.format(expr))
aggr = etype2aggr[etype[1]]
fluent = aggr(vars_list=vars_list)
return fluent
| 728,149
|
Instantiates Event instance.
Args:
json(dict): Event json from homeserver.
Api(func): Creates api for calling homeserver.
|
def __init__(self, json, Api):
self.json = json
self.Api = Api
self.type = json["type"]
self.content = json["content"]
self.timestamp = json["origin_server_ts"]
self.id = json["room_id"]
if "sender" in json:
self.mxid = json["sender"]
else:
self.mxid = json["user_id"]
| 728,163
|
Instantiates EventStream instance.
Args:
json(list): List from deserializing txn from homeserver.
Api(func): Generates http api when passed identity=mxid.
|
def __init__(self, json, Api):
self.json = json
self._index = 0
self.Api = Api
| 728,166
|
Instantiates MatrixRoom object.
Args:
room_id(str): Matrix room id (e.g. !1234567:example.com)
api(MatrixASHttpAPI): Api for calls to the server.
|
def __init__(self, room_id, api):
self.room_id = room_id
self.api = api
| 728,168
|
Instantiates MatrixUser object.
Args:
mxid(str): User id (e.g. @me:example.createRoom)
Api(func): Generates api for calls to the server.
|
def __init__(self, mxid, Api):
self.mxid = mxid
self.user_api = Api(identity=mxid)
self.api = Api()
self._rooms = {}
| 728,173
|
Lists the container groups in the specified resource group.
Arguments:
aci_client {azure.mgmt.containerinstance.ContainerInstanceManagementClient}
-- An authenticated container instance management client.
resource_group {azure.mgmt.resource.resources.models.ResourceGroup}
-- The resource group containing the container group(s).
|
def list_container_groups(self, resource_group_name):
print("Listing container groups in resource group '{0}'...".format(resource_group_name))
container_groups = self.client.container_groups.list_by_resource_group(resource_group_name)
for container_group in container_groups:
print(" {0}".format(container_group.name))
| 728,886
|
List the blobs/files inside a container/share_name.
Args:
container_or_share_name(str): Name of the container/share_name where we want to list the blobs/files.
container(bool): flag to know it you are listing files or blobs.
account(str): The name of the storage account.
|
def list(self, container_or_share_name, container=None, account=None):
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, account).keys[0].value
if container:
bs = BlockBlobService(account_name=account, account_key=key)
container_list = []
for i in bs.list_blobs(container_or_share_name).items:
container_list.append(i.name)
return container_list
elif not container:
fs = FileService(account_name=account, account_key=key)
container_list = []
for i in fs.list_directories_and_files(container_or_share_name).items:
container_list.append(i.name)
return container_list
else:
raise ValueError("You have to pass a value for container param")
| 728,966
|
Sign a remote file to distribute. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob.
Args:
remote_file(str): The blob that we want to sign.
|
def generate_url(self, remote_file):
parse_url = _parse_url(remote_file)
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value
if parse_url.file_type == 'blob':
bs = BlockBlobService(account_name=parse_url.account, account_key=key)
sas_token = bs.generate_blob_shared_access_signature(parse_url.container_or_share_name,
parse_url.file,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=24),
)
source_blob_url = bs.make_blob_url(container_name=parse_url.container_or_share_name,
blob_name=parse_url.file,
sas_token=sas_token)
return source_blob_url
elif parse_url.file_type == 'file':
fs = FileService(account_name=parse_url.account, account_key=key)
sas_token = fs.generate_file_shared_access_signature(share_name=parse_url.container_or_share_name,
directory_name=parse_url.path,
file_name=parse_url.file,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=24),
)
source_file_url = fs.make_file_url(share_name=parse_url.container_or_share_name,
directory_name=parse_url.path,
file_name=parse_url.file,
sas_token=sas_token)
return source_file_url
else:
raise ValueError("This azure storage type is not valid. It should be blob or file.")
| 728,967
|
Delete file from the cloud. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob.
Args:
remote_file(str): The path of the file to be deleted.
Raises:
:exc:`~..OsmosisError`: if the file is not uploaded correctly.
|
def delete(self, remote_file):
if 'core.windows.net' not in remote_file:
self.logger.error("Source or destination must be a azure storage url (format "
"https://myaccount.blob.core.windows.net/mycontainer/myblob")
raise OsmosisError
parse_url = _parse_url(remote_file)
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value
if parse_url.file_type == 'blob':
bs = BlockBlobService(account_name=parse_url.account, account_key=key)
return bs.delete_blob(parse_url.container_or_share_name, parse_url.file)
elif parse_url.file_type == 'file':
fs = FileService(account_name=parse_url.account, account_key=key)
return fs.delete_file(parse_url.container_or_share_name, parse_url.path, parse_url.file)
else:
raise ValueError("This azure storage type is not valid. It should be blob or file.")
| 728,968
|
Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob.
Args:
source_path(str): The path of the file to be copied.
dest_path(str): The destination path where the file is going to be allocated.
Raises:
:exc:`~..OsmosisError`: if the file is not uploaded correctly.
|
def copy(self, source_path, dest_path, account=None, group_name=None):
if 'core.windows.net' not in source_path and 'core.windows.net' not in dest_path:
self.logger.error("Source or destination must be a azure storage url (format "
"https://myaccount.blob.core.windows.net/mycontainer/myblob")
raise OsmosisError
# Check if source exists and can read
if 'core.windows.net' in source_path:
parse_url = _parse_url(source_path)
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[
0].value
if parse_url.file_type == 'blob':
bs = BlockBlobService(account_name=parse_url.account, account_key=key)
return bs.get_blob_to_path(parse_url.container_or_share_name, parse_url.file, dest_path)
elif parse_url.file_type == 'file':
fs = FileService(account_name=parse_url.account, account_key=key)
return fs.get_file_to_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, dest_path)
else:
raise ValueError("This azure storage type is not valid. It should be blob or file.")
else:
parse_url = _parse_url(dest_path)
key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[
0].value
if parse_url.file_type == 'blob':
bs = BlockBlobService(account_name=parse_url.account, account_key=key)
return bs.create_blob_from_path(parse_url.container_or_share_name, parse_url.file, source_path)
elif parse_url.file_type == 'file':
fs = FileService(account_name=parse_url.account, account_key=key)
return fs.create_file_from_path(parse_url.container_or_share_name, parse_url.path, parse_url.file,
source_path)
else:
raise ValueError("This azure storage type is not valid. It should be blob or file.")
| 728,969
|
Sends details about a Python to the log, specifically its ``repr()``
representation, and all of its attributes with their name, value, and type.
Args:
obj: object to debug
log_level: log level to use; default is ``logging.DEBUG``
|
def debug_object(obj, log_level: int = logging.DEBUG) -> None:
msgs = ["For {o!r}:".format(o=obj)]
for attrname in dir(obj):
attribute = getattr(obj, attrname)
msgs.append("- {an!r}: {at!r}, of type {t!r}".format(
an=attrname, at=attribute, t=type(attribute)))
log.log(log_level, "{}", "\n".join(msgs))
| 729,627
|
Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks?
|
def contains(self, time: datetime.datetime,
inclusive: bool = True) -> bool:
if inclusive:
return self.start <= time <= self.end
else:
return self.start < time < self.end
| 729,680
|
Is this interval contained within the other?
Args:
other: the :class:`Interval` to check
inclusive: use inclusive rather than exclusive range checks?
|
def within(self, other: "Interval", inclusive: bool = True) -> bool:
if not other:
return False
if inclusive:
return self.start >= other.start and self.end <= other.end
else:
return self.start > other.start and self.end < other.end
| 729,681
|
Creates the :class:`IntervalList`.
Args:
intervals: optional list of :class:`Interval` objects to
incorporate into the :class:`IntervalList`
no_overlap: merge intervals that overlap (now and on subsequent
addition)?
no_contiguous: if ``no_overlap`` is set, merge intervals that are
contiguous too?
|
def __init__(self,
intervals: List[Interval] = None,
no_overlap: bool = True,
no_contiguous: bool = True) -> None:
# DO NOT USE intervals=[] in the function signature; that's the route
# to a mutable default and a huge amount of confusion as separate
# objects appear non-independent.
self.intervals = [] if intervals is None else list(intervals)
self.no_overlap = no_overlap
self.no_contiguous = no_contiguous
for i in self.intervals:
if not isinstance(i, Interval):
raise TypeError(
"IntervalList creation failed: contents are not all "
"Interval: {}".format(repr(self.intervals)))
self._tidy()
| 729,692
|
Makes and returns a copy of the :class:`IntervalList`. The
``no_overlap``/``no_contiguous`` parameters can be changed.
Args:
no_overlap: merge intervals that overlap (now and on subsequent
addition)?
no_contiguous: if ``no_overlap`` is set, merge intervals that are
contiguous too?
|
def copy(self, no_overlap: bool = None,
no_contiguous: bool = None) -> "IntervalList":
if no_overlap is None:
no_overlap = self.no_overlap
if no_contiguous is None:
no_contiguous = self.no_contiguous
return IntervalList(self.intervals, no_overlap=no_overlap,
no_contiguous=no_contiguous)
| 729,694
|
Called by :meth:`remove_overlap`. Removes the first overlap found.
Args:
also_remove_contiguous: treat contiguous (as well as overlapping)
intervals as worthy of merging?
Returns:
bool: ``True`` if an overlap was removed; ``False`` otherwise
|
def _remove_overlap_sub(self, also_remove_contiguous: bool) -> bool:
# Returns
for i in range(len(self.intervals)):
for j in range(i + 1, len(self.intervals)):
first = self.intervals[i]
second = self.intervals[j]
if also_remove_contiguous:
test = first.contiguous(second)
else:
test = first.overlaps(second)
if test:
newint = first.union(second)
self.intervals.pop(j)
self.intervals.pop(i) # note that i must be less than j
self.intervals.append(newint)
return True
return False
| 729,697
|
Merges any overlapping intervals.
Args:
also_remove_contiguous: treat contiguous (as well as overlapping)
intervals as worthy of merging?
|
def remove_overlap(self, also_remove_contiguous: bool = False) -> None:
overlap = True
while overlap:
overlap = self._remove_overlap_sub(also_remove_contiguous)
self._sort()
| 729,698
|
Do any of the intervals overlap?
Args:
test_overlap: if ``True``, test for overlapping intervals; if
``False``, test for contiguous intervals.
|
def _any_overlap_or_contiguous(self, test_overlap: bool) -> bool:
for i in range(len(self.intervals)):
for j in range(i + 1, len(self.intervals)):
first = self.intervals[i]
second = self.intervals[j]
if test_overlap:
test = first.overlaps(second)
else:
test = first.contiguous(second)
if test:
return True
return False
| 729,699
|
Fetches a Git repository, unless we have it already.
Args:
prettyname: name to display to user
url: URL
directory: destination directory
branch: repository branch
commit: repository commit tag
clone_options: additional options to pass to ``git clone``
run_func: function to use to call an external command
Returns:
did we need to do anything?
|
def git_clone(prettyname: str, url: str, directory: str,
branch: str = None,
commit: str = None,
clone_options: List[str] = None,
run_func: Callable[[List[str]], Any] = None) -> bool:
run_func = run_func or subprocess.check_call
clone_options = clone_options or [] # type: List[str]
if os.path.isdir(directory):
log.info("Not re-cloning {} Git repository: using existing source "
"in {}".format(prettyname, directory))
return False
log.info("Fetching {} source from {} into {}",
prettyname, url, directory)
require_executable(GIT)
gitargs = [GIT, "clone"] + clone_options
if branch:
gitargs += ["--branch", branch]
gitargs += [url, directory]
run_func(gitargs)
if commit:
log.info("Resetting {} local Git repository to commit {}",
prettyname, commit)
run_func([GIT,
"-C", directory,
"reset", "--hard", commit])
# Using a Git repository that's not in the working directory:
# https://stackoverflow.com/questions/1386291/git-git-dir-not-working-as-expected # noqa
return True
| 729,772
|
Run a command and returns its stdout.
Args:
args: the command-line arguments
env: the operating system environment to use
encoding: the encoding to use for ``stdout``
Returns:
the command's ``stdout`` output
|
def fetch(args: List[str], env: Dict[str, str] = None,
encoding: str = sys.getdefaultencoding()) -> str:
stdout, _ = run(args, env=env, capture_stdout=True,
echo_stdout=False, encoding=encoding)
log.debug(stdout)
return stdout
| 729,776
|
Dumps some connection info, as an SQL comment. Obscures passwords.
Args:
engine: the SQLAlchemy :class:`Engine` to dump metadata information
from
fileobj: the file-like object (default ``sys.stdout``) to write
information to
|
def dump_connection_info(engine: Engine, fileobj: TextIO = sys.stdout) -> None:
meta = MetaData(bind=engine)
writeline_nl(fileobj, sql_comment('Database info: {}'.format(meta)))
| 729,777
|
Sends schema-creating DDL from the metadata to the dump engine.
This makes ``CREATE TABLE`` statements.
Args:
metadata: SQLAlchemy :class:`MetaData`
dialect_name: string name of SQL dialect to generate DDL in
fileobj: file-like object to send DDL to
checkfirst: if ``True``, use ``CREATE TABLE IF NOT EXISTS`` or
equivalent.
|
def dump_ddl(metadata: MetaData,
dialect_name: str,
fileobj: TextIO = sys.stdout,
checkfirst: bool = True) -> None:
# http://docs.sqlalchemy.org/en/rel_0_8/faq.html#how-can-i-get-the-create-table-drop-table-output-as-a-string # noqa
# http://stackoverflow.com/questions/870925/how-to-generate-a-file-with-ddl-in-the-engines-sql-dialect-in-sqlalchemy # noqa
# https://github.com/plq/scripts/blob/master/pg_dump.py
# noinspection PyUnusedLocal
def dump(querysql, *multiparams, **params):
compsql = querysql.compile(dialect=engine.dialect)
writeline_nl(fileobj, "{sql};".format(sql=compsql))
writeline_nl(fileobj,
sql_comment("Schema (for dialect {}):".format(dialect_name)))
engine = create_engine('{dialect}://'.format(dialect=dialect_name),
strategy='mock', executor=dump)
metadata.create_all(engine, checkfirst=checkfirst)
| 729,778
|
Makes a new SQLAlchemy mapper for an existing table.
See
http://www.tylerlesmann.com/2009/apr/27/copying-databases-across-platforms-sqlalchemy/
Args:
table: SQLAlchemy :class:`Table` object
Returns:
a :class:`DeclarativeMeta` class
|
def quick_mapper(table: Table) -> Type[DeclarativeMeta]:
# noqa
# noinspection PyPep8Naming
Base = declarative_base()
class GenericMapper(Base):
__table__ = table
# noinspection PyTypeChecker
return GenericMapper
| 729,779
|
Reads a table from the database, and writes SQL to replicate the table's
data to the output ``fileobj``.
Args:
engine: SQLAlchemy :class:`Engine`
table_name: name of the table
fileobj: file-like object to write to
wheredict: optional dictionary of ``{column_name: value}`` to use as
``WHERE`` filters
include_ddl: if ``True``, include the DDL to create the table as well
multirow: write multi-row ``INSERT`` statements
|
def dump_table_as_insert_sql(engine: Engine,
table_name: str,
fileobj: TextIO,
wheredict: Dict[str, Any] = None,
include_ddl: bool = False,
multirow: bool = False) -> None:
# http://stackoverflow.com/questions/5631078/sqlalchemy-print-the-actual-query # noqa
# http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
# http://www.tylerlesmann.com/2009/apr/27/copying-databases-across-platforms-sqlalchemy/ # noqa
# https://github.com/plq/scripts/blob/master/pg_dump.py
log.info("dump_data_as_insert_sql: table_name={}", table_name)
writelines_nl(fileobj, [
SEP1,
sql_comment("Data for table: {}".format(table_name)),
SEP2,
sql_comment("Filters: {}".format(wheredict)),
])
dialect = engine.dialect
if not dialect.supports_multivalues_insert:
multirow = False
if multirow:
log.warning("dump_data_as_insert_sql: multirow parameter substitution "
"not working yet")
multirow = False
# literal_query = make_literal_query_fn(dialect)
meta = MetaData(bind=engine)
log.debug("... retrieving schema")
table = Table(table_name, meta, autoload=True)
if include_ddl:
log.debug("... producing DDL")
dump_ddl(table.metadata, dialect_name=engine.dialect.name,
fileobj=fileobj)
# NewRecord = quick_mapper(table)
# columns = table.columns.keys()
log.debug("... fetching records")
# log.debug("meta: {}", meta) # obscures password
# log.debug("table: {}", table)
# log.debug("table.columns: {!r}", table.columns)
# log.debug("multirow: {}", multirow)
query = select(table.columns)
if wheredict:
for k, v in wheredict.items():
col = table.columns.get(k)
query = query.where(col == v)
# log.debug("query: {}", query)
cursor = engine.execute(query)
if multirow:
row_dict_list = []
for r in cursor:
row_dict_list.append(dict(r))
# log.debug("row_dict_list: {}", row_dict_list)
if row_dict_list:
statement = table.insert().values(row_dict_list)
# log.debug("statement: {!r}", statement)
# insert_str = literal_query(statement)
insert_str = get_literal_query(statement, bind=engine)
# NOT WORKING FOR MULTIROW INSERTS. ONLY SUBSTITUTES FIRST ROW.
writeline_nl(fileobj, insert_str)
else:
writeline_nl(fileobj, sql_comment("No data!"))
else:
found_one = False
for r in cursor:
found_one = True
row_dict = dict(r)
statement = table.insert(values=row_dict)
# insert_str = literal_query(statement)
insert_str = get_literal_query(statement, bind=engine)
# log.debug("row_dict: {}", row_dict)
# log.debug("insert_str: {}", insert_str)
writeline_nl(fileobj, insert_str)
if not found_one:
writeline_nl(fileobj, sql_comment("No data!"))
writeline_nl(fileobj, SEP2)
log.debug("... done")
| 729,782
|
Reads an entire database and writes SQL to replicate it to the output
file-like object.
Args:
engine: SQLAlchemy :class:`Engine`
fileobj: file-like object to write to
include_ddl: if ``True``, include the DDL to create the table as well
multirow: write multi-row ``INSERT`` statements
|
def dump_database_as_insert_sql(engine: Engine,
fileobj: TextIO = sys.stdout,
include_ddl: bool = False,
multirow: bool = False) -> None:
for tablename in get_table_names(engine):
dump_table_as_insert_sql(
engine=engine,
table_name=tablename,
fileobj=fileobj,
include_ddl=include_ddl,
multirow=multirow
)
| 729,783
|
Takes a SQLAlchemy ORM object, and writes ``INSERT`` SQL to replicate it
to the output file-like object.
Args:
engine: SQLAlchemy :class:`Engine`
obj: SQLAlchemy ORM object to write
fileobj: file-like object to write to
|
def dump_orm_object_as_insert_sql(engine: Engine,
obj: object,
fileobj: TextIO) -> None:
# literal_query = make_literal_query_fn(engine.dialect)
insp = inspect(obj)
# insp: an InstanceState
# http://docs.sqlalchemy.org/en/latest/orm/internals.html#sqlalchemy.orm.state.InstanceState # noqa
# insp.mapper: a Mapper
# http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper # noqa
# Don't do this:
# table = insp.mapper.mapped_table
# Do this instead. The method above gives you fancy data types like list
# and Arrow on the Python side. We want the bog-standard datatypes drawn
# from the database itself.
meta = MetaData(bind=engine)
table_name = insp.mapper.mapped_table.name
# log.debug("table_name: {}", table_name)
table = Table(table_name, meta, autoload=True)
# log.debug("table: {}", table)
# NewRecord = quick_mapper(table)
# columns = table.columns.keys()
query = select(table.columns)
# log.debug("query: {}", query)
for orm_pkcol in insp.mapper.primary_key:
core_pkcol = table.columns.get(orm_pkcol.name)
pkval = getattr(obj, orm_pkcol.name)
query = query.where(core_pkcol == pkval)
# log.debug("query: {}", query)
cursor = engine.execute(query)
row = cursor.fetchone() # should only be one...
row_dict = dict(row)
# log.debug("obj: {}", obj)
# log.debug("row_dict: {}", row_dict)
statement = table.insert(values=row_dict)
# insert_str = literal_query(statement)
insert_str = get_literal_query(statement, bind=engine)
writeline_nl(fileobj, insert_str)
| 729,784
|
Writes bulk ``INSERT`` preamble (start=True) or end (start=False).
For MySQL, this temporarily switches off autocommit behaviour and index/FK
checks, for speed, then re-enables them at the end and commits.
Args:
dialect_name: SQLAlchemy dialect name (see :class:`SqlaDialectName`)
fileobj: file-like object to write to
start: if ``True``, write preamble; if ``False``, write end
|
def bulk_insert_extras(dialect_name: str,
fileobj: TextIO,
start: bool) -> None:
lines = []
if dialect_name == SqlaDialectName.MYSQL:
if start:
lines = [
"SET autocommit=0;",
"SET unique_checks=0;",
"SET foreign_key_checks=0;",
]
else:
lines = [
"SET foreign_key_checks=1;",
"SET unique_checks=1;",
"COMMIT;",
]
writelines_nl(fileobj, lines)
| 729,785
|
Converts a list of lists into a flat list.
Args:
x: list of lists
Returns:
flat list
As per
http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
|
def flatten_list(x: List[Any]) -> List[Any]:
# noqa
return [item for sublist in x for item in sublist]
| 729,838
|
Returns a list of all the unique elements in the input list.
Args:
seq: input list
Returns:
list of unique elements
As per
http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order
|
def unique_list(seq: Iterable[Any]) -> List[Any]:
# noqa
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
| 729,839
|
Yield successive ``n``-sized chunks from ``l``.
Args:
l: input list
n: chunk size
Yields:
successive chunks of size ``n``
|
def chunks(l: List[Any], n: int) -> Iterable[List[Any]]:
for i in range(0, len(l), n):
yield l[i:i + n]
| 729,840
|
Used to create :data:`UNICODE_CATEGORY_STRINGS`.
Args:
srclist: list of integers or hex range strings like ``"0061-007A"``
Returns:
a string with all characters described by ``srclist``: either the
character corresponding to the integer Unicode character number, or
all characters corresponding to the inclusive range described
|
def _unicode_def_src_to_str(srclist: List[Union[str, int]]) -> str:
charlist = [] # type: List[str]
for src in srclist:
if isinstance(src, int):
charlist.append(chr(src))
else:
# Range like "0041-005A"
first, last = [int(x, 16) for x in src.split("-")]
charlist += [chr(x) for x in range(first, last + 1)]
return "".join(charlist)
| 729,845
|
As per
https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex.
Multi-platform variant of ``shlex.split()`` for command-line splitting.
For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX.
Args:
s:
string to split
platform:
- ``'this'`` = auto from current platform;
- ``1`` = POSIX;
- ``0`` = Windows/CMD
- (other values reserved)
|
def cmdline_split(s: str, platform: Union[int, str] = 'this') -> List[str]:
# noqa
if platform == 'this':
platform = (sys.platform != 'win32') # RNC: includes 64-bit Windows
if platform == 1: # POSIX
re_cmd_lex = r # noqa
elif platform == 0: # Windows/CMD
re_cmd_lex = r # noqa
else:
raise AssertionError('unknown platform %r' % platform)
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(re_cmd_lex, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
| 729,882
|
Retrieves a string value from a parser.
Args:
parser: instance of :class:`ConfigParser`
section: section name within config file
option: option (variable) name within that section
default: value to return if option is absent
Returns:
string value
Raises:
ValueError: if the section is absent
|
def get_config_string_option(parser: ConfigParser,
section: str,
option: str,
default: str = None) -> str:
if not parser.has_section(section):
raise ValueError("config missing section: " + section)
return parser.get(section, option, fallback=default)
| 729,891
|
Reads config options and writes them as attributes of ``obj``, with
attribute names as per ``options``.
Args:
obj: the object to modify
parser: instance of :class:`ConfigParser`
section: section name within config file
options: option (variable) names within that section
default: value to use for any missing options
Returns:
|
def read_config_string_options(obj: Any,
parser: ConfigParser,
section: str,
options: Iterable[str],
default: str = None) -> None:
# enforce_str removed; ConfigParser always returns strings unless asked
# specifically
for o in options:
setattr(obj, o, get_config_string_option(parser, section, o,
default=default))
| 729,892
|
Retrieves a multi-line string value from a parser as a list of strings
(one per line, ignoring blank lines).
Args:
parser: instance of :class:`ConfigParser`
section: section name within config file
option: option (variable) name within that section
default: value to return if option is absent (``None`` is mapped to
``[]``)
Returns:
list of strings
Raises:
ValueError: if the section is absent
|
def get_config_multiline_option(parser: ConfigParser,
section: str,
option: str,
default: List[str] = None) -> List[str]:
default = default or []
if not parser.has_section(section):
raise ValueError("config missing section: " + section)
try:
multiline = parser.get(section, option)
values = [x.strip() for x in multiline.splitlines() if x.strip()]
return values
except NoOptionError:
return default
| 729,893
|
Retrieves a boolean value from a parser.
Args:
parser: instance of :class:`ConfigParser`
section: section name within config file
option: option (variable) name within that section
default: value to return if option is absent
Returns:
string value
Raises:
ValueError: if the section is absent
|
def get_config_bool_option(parser: ConfigParser,
section: str,
option: str,
default: bool = None) -> bool:
if not parser.has_section(section):
raise ValueError("config missing section: " + section)
return parser.getboolean(section, option, fallback=default)
| 729,895
|
Fetch parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
fn: function to apply to string parameter (e.g. ``int``)
default: default value
Returns:
parameter value, or ``None`` if ``default is None``, or ``fn(default)``
|
def get_config_parameter(config: ConfigParser,
section: str,
param: str,
fn: Callable[[Any], Any],
default: Any) -> Any:
try:
value = fn(config.get(section, param))
except (TypeError, ValueError, NoOptionError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
if default is None:
value = default
else:
value = fn(default)
return value
| 729,896
|
Get Boolean parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
|
def get_config_parameter_boolean(config: ConfigParser,
section: str,
param: str,
default: bool) -> bool:
try:
value = config.getboolean(section, param)
except (TypeError, ValueError, NoOptionError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
value = default
return value
| 729,897
|
Get ``loglevel`` parameter from ``configparser`` ``.INI`` file, e.g.
mapping ``'debug'`` to ``logging.DEBUG``.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
|
def get_config_parameter_loglevel(config: ConfigParser,
section: str,
param: str,
default: int) -> int:
try:
value = config.get(section, param).lower()
if value == "debug":
return logging.DEBUG # 10
elif value == "info":
return logging.INFO
elif value in ["warn", "warning"]:
return logging.WARN
elif value == "error":
return logging.ERROR
elif value in ["critical", "fatal"]:
return logging.CRITICAL # 50
else:
raise ValueError
except (TypeError, ValueError, NoOptionError, AttributeError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
return default
| 729,898
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.